exec.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529
  1. /*
  2. * linux/fs/exec.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. /*
  7. * #!-checking implemented by tytso.
  8. */
  9. /*
  10. * Demand-loading implemented 01.12.91 - no need to read anything but
  11. * the header into memory. The inode of the executable is put into
  12. * "current->executable", and page faults do the actual loading. Clean.
  13. *
  14. * Once more I can proudly say that linux stood up to being changed: it
  15. * was less than 2 hours work to get demand-loading completely implemented.
  16. *
  17. * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
  18. * current->executable is only used by the procfs. This allows a dispatch
  19. * table to check for several different types of binary formats. We keep
  20. * trying until we recognize the file or we run out of supported binary
  21. * formats.
  22. */
  23. #include <linux/config.h>
  24. #include <linux/slab.h>
  25. #include <linux/file.h>
  26. #include <linux/mman.h>
  27. #include <linux/a.out.h>
  28. #include <linux/stat.h>
  29. #include <linux/fcntl.h>
  30. #include <linux/smp_lock.h>
  31. #include <linux/init.h>
  32. #include <linux/pagemap.h>
  33. #include <linux/highmem.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/key.h>
  36. #include <linux/personality.h>
  37. #include <linux/binfmts.h>
  38. #include <linux/swap.h>
  39. #include <linux/utsname.h>
  40. #include <linux/module.h>
  41. #include <linux/namei.h>
  42. #include <linux/proc_fs.h>
  43. #include <linux/ptrace.h>
  44. #include <linux/mount.h>
  45. #include <linux/security.h>
  46. #include <linux/syscalls.h>
  47. #include <linux/rmap.h>
  48. #include <linux/acct.h>
  49. #include <asm/uaccess.h>
  50. #include <asm/mmu_context.h>
  51. #ifdef CONFIG_KMOD
  52. #include <linux/kmod.h>
  53. #endif
  54. int core_uses_pid;
  55. char core_pattern[65] = "core";
  56. int suid_dumpable = 0;
  57. EXPORT_SYMBOL(suid_dumpable);
  58. /* The maximal length of core_pattern is also specified in sysctl.c */
  59. static struct linux_binfmt *formats;
  60. static DEFINE_RWLOCK(binfmt_lock);
  61. int register_binfmt(struct linux_binfmt * fmt)
  62. {
  63. struct linux_binfmt ** tmp = &formats;
  64. if (!fmt)
  65. return -EINVAL;
  66. if (fmt->next)
  67. return -EBUSY;
  68. write_lock(&binfmt_lock);
  69. while (*tmp) {
  70. if (fmt == *tmp) {
  71. write_unlock(&binfmt_lock);
  72. return -EBUSY;
  73. }
  74. tmp = &(*tmp)->next;
  75. }
  76. fmt->next = formats;
  77. formats = fmt;
  78. write_unlock(&binfmt_lock);
  79. return 0;
  80. }
  81. EXPORT_SYMBOL(register_binfmt);
  82. int unregister_binfmt(struct linux_binfmt * fmt)
  83. {
  84. struct linux_binfmt ** tmp = &formats;
  85. write_lock(&binfmt_lock);
  86. while (*tmp) {
  87. if (fmt == *tmp) {
  88. *tmp = fmt->next;
  89. write_unlock(&binfmt_lock);
  90. return 0;
  91. }
  92. tmp = &(*tmp)->next;
  93. }
  94. write_unlock(&binfmt_lock);
  95. return -EINVAL;
  96. }
  97. EXPORT_SYMBOL(unregister_binfmt);
  98. static inline void put_binfmt(struct linux_binfmt * fmt)
  99. {
  100. module_put(fmt->module);
  101. }
  102. /*
  103. * Note that a shared library must be both readable and executable due to
  104. * security reasons.
  105. *
  106. * Also note that we take the address to load from from the file itself.
  107. */
  108. asmlinkage long sys_uselib(const char __user * library)
  109. {
  110. struct file * file;
  111. struct nameidata nd;
  112. int error;
  113. nd.intent.open.flags = FMODE_READ;
  114. error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
  115. if (error)
  116. goto out;
  117. error = -EINVAL;
  118. if (!S_ISREG(nd.dentry->d_inode->i_mode))
  119. goto exit;
  120. error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC, &nd);
  121. if (error)
  122. goto exit;
  123. file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
  124. error = PTR_ERR(file);
  125. if (IS_ERR(file))
  126. goto out;
  127. error = -ENOEXEC;
  128. if(file->f_op) {
  129. struct linux_binfmt * fmt;
  130. read_lock(&binfmt_lock);
  131. for (fmt = formats ; fmt ; fmt = fmt->next) {
  132. if (!fmt->load_shlib)
  133. continue;
  134. if (!try_module_get(fmt->module))
  135. continue;
  136. read_unlock(&binfmt_lock);
  137. error = fmt->load_shlib(file);
  138. read_lock(&binfmt_lock);
  139. put_binfmt(fmt);
  140. if (error != -ENOEXEC)
  141. break;
  142. }
  143. read_unlock(&binfmt_lock);
  144. }
  145. fput(file);
  146. out:
  147. return error;
  148. exit:
  149. path_release(&nd);
  150. goto out;
  151. }
  152. /*
  153. * count() counts the number of strings in array ARGV.
  154. */
  155. static int count(char __user * __user * argv, int max)
  156. {
  157. int i = 0;
  158. if (argv != NULL) {
  159. for (;;) {
  160. char __user * p;
  161. if (get_user(p, argv))
  162. return -EFAULT;
  163. if (!p)
  164. break;
  165. argv++;
  166. if(++i > max)
  167. return -E2BIG;
  168. cond_resched();
  169. }
  170. }
  171. return i;
  172. }
  173. /*
  174. * 'copy_strings()' copies argument/environment strings from user
  175. * memory to free pages in kernel mem. These are in a format ready
  176. * to be put directly into the top of new user memory.
  177. */
  178. static int copy_strings(int argc, char __user * __user * argv,
  179. struct linux_binprm *bprm)
  180. {
  181. struct page *kmapped_page = NULL;
  182. char *kaddr = NULL;
  183. int ret;
  184. while (argc-- > 0) {
  185. char __user *str;
  186. int len;
  187. unsigned long pos;
  188. if (get_user(str, argv+argc) ||
  189. !(len = strnlen_user(str, bprm->p))) {
  190. ret = -EFAULT;
  191. goto out;
  192. }
  193. if (bprm->p < len) {
  194. ret = -E2BIG;
  195. goto out;
  196. }
  197. bprm->p -= len;
  198. /* XXX: add architecture specific overflow check here. */
  199. pos = bprm->p;
  200. while (len > 0) {
  201. int i, new, err;
  202. int offset, bytes_to_copy;
  203. struct page *page;
  204. offset = pos % PAGE_SIZE;
  205. i = pos/PAGE_SIZE;
  206. page = bprm->page[i];
  207. new = 0;
  208. if (!page) {
  209. page = alloc_page(GFP_HIGHUSER);
  210. bprm->page[i] = page;
  211. if (!page) {
  212. ret = -ENOMEM;
  213. goto out;
  214. }
  215. new = 1;
  216. }
  217. if (page != kmapped_page) {
  218. if (kmapped_page)
  219. kunmap(kmapped_page);
  220. kmapped_page = page;
  221. kaddr = kmap(kmapped_page);
  222. }
  223. if (new && offset)
  224. memset(kaddr, 0, offset);
  225. bytes_to_copy = PAGE_SIZE - offset;
  226. if (bytes_to_copy > len) {
  227. bytes_to_copy = len;
  228. if (new)
  229. memset(kaddr+offset+len, 0,
  230. PAGE_SIZE-offset-len);
  231. }
  232. err = copy_from_user(kaddr+offset, str, bytes_to_copy);
  233. if (err) {
  234. ret = -EFAULT;
  235. goto out;
  236. }
  237. pos += bytes_to_copy;
  238. str += bytes_to_copy;
  239. len -= bytes_to_copy;
  240. }
  241. }
  242. ret = 0;
  243. out:
  244. if (kmapped_page)
  245. kunmap(kmapped_page);
  246. return ret;
  247. }
  248. /*
  249. * Like copy_strings, but get argv and its values from kernel memory.
  250. */
  251. int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
  252. {
  253. int r;
  254. mm_segment_t oldfs = get_fs();
  255. set_fs(KERNEL_DS);
  256. r = copy_strings(argc, (char __user * __user *)argv, bprm);
  257. set_fs(oldfs);
  258. return r;
  259. }
  260. EXPORT_SYMBOL(copy_strings_kernel);
  261. #ifdef CONFIG_MMU
  262. /*
  263. * This routine is used to map in a page into an address space: needed by
  264. * execve() for the initial stack and environment pages.
  265. *
  266. * vma->vm_mm->mmap_sem is held for writing.
  267. */
  268. void install_arg_page(struct vm_area_struct *vma,
  269. struct page *page, unsigned long address)
  270. {
  271. struct mm_struct *mm = vma->vm_mm;
  272. pgd_t * pgd;
  273. pud_t * pud;
  274. pmd_t * pmd;
  275. pte_t * pte;
  276. if (unlikely(anon_vma_prepare(vma)))
  277. goto out_sig;
  278. flush_dcache_page(page);
  279. pgd = pgd_offset(mm, address);
  280. spin_lock(&mm->page_table_lock);
  281. pud = pud_alloc(mm, pgd, address);
  282. if (!pud)
  283. goto out;
  284. pmd = pmd_alloc(mm, pud, address);
  285. if (!pmd)
  286. goto out;
  287. pte = pte_alloc_map(mm, pmd, address);
  288. if (!pte)
  289. goto out;
  290. if (!pte_none(*pte)) {
  291. pte_unmap(pte);
  292. goto out;
  293. }
  294. inc_mm_counter(mm, rss);
  295. lru_cache_add_active(page);
  296. set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
  297. page, vma->vm_page_prot))));
  298. page_add_anon_rmap(page, vma, address);
  299. pte_unmap(pte);
  300. spin_unlock(&mm->page_table_lock);
  301. /* no need for flush_tlb */
  302. return;
  303. out:
  304. spin_unlock(&mm->page_table_lock);
  305. out_sig:
  306. __free_page(page);
  307. force_sig(SIGKILL, current);
  308. }
  309. #define EXTRA_STACK_VM_PAGES 20 /* random */
  310. int setup_arg_pages(struct linux_binprm *bprm,
  311. unsigned long stack_top,
  312. int executable_stack)
  313. {
  314. unsigned long stack_base;
  315. struct vm_area_struct *mpnt;
  316. struct mm_struct *mm = current->mm;
  317. int i, ret;
  318. long arg_size;
  319. #ifdef CONFIG_STACK_GROWSUP
  320. /* Move the argument and environment strings to the bottom of the
  321. * stack space.
  322. */
  323. int offset, j;
  324. char *to, *from;
  325. /* Start by shifting all the pages down */
  326. i = 0;
  327. for (j = 0; j < MAX_ARG_PAGES; j++) {
  328. struct page *page = bprm->page[j];
  329. if (!page)
  330. continue;
  331. bprm->page[i++] = page;
  332. }
  333. /* Now move them within their pages */
  334. offset = bprm->p % PAGE_SIZE;
  335. to = kmap(bprm->page[0]);
  336. for (j = 1; j < i; j++) {
  337. memmove(to, to + offset, PAGE_SIZE - offset);
  338. from = kmap(bprm->page[j]);
  339. memcpy(to + PAGE_SIZE - offset, from, offset);
  340. kunmap(bprm->page[j - 1]);
  341. to = from;
  342. }
  343. memmove(to, to + offset, PAGE_SIZE - offset);
  344. kunmap(bprm->page[j - 1]);
  345. /* Limit stack size to 1GB */
  346. stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
  347. if (stack_base > (1 << 30))
  348. stack_base = 1 << 30;
  349. stack_base = PAGE_ALIGN(stack_top - stack_base);
  350. /* Adjust bprm->p to point to the end of the strings. */
  351. bprm->p = stack_base + PAGE_SIZE * i - offset;
  352. mm->arg_start = stack_base;
  353. arg_size = i << PAGE_SHIFT;
  354. /* zero pages that were copied above */
  355. while (i < MAX_ARG_PAGES)
  356. bprm->page[i++] = NULL;
  357. #else
  358. stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
  359. stack_base = PAGE_ALIGN(stack_base);
  360. bprm->p += stack_base;
  361. mm->arg_start = bprm->p;
  362. arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
  363. #endif
  364. arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
  365. if (bprm->loader)
  366. bprm->loader += stack_base;
  367. bprm->exec += stack_base;
  368. mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  369. if (!mpnt)
  370. return -ENOMEM;
  371. if (security_vm_enough_memory(arg_size >> PAGE_SHIFT)) {
  372. kmem_cache_free(vm_area_cachep, mpnt);
  373. return -ENOMEM;
  374. }
  375. memset(mpnt, 0, sizeof(*mpnt));
  376. down_write(&mm->mmap_sem);
  377. {
  378. mpnt->vm_mm = mm;
  379. #ifdef CONFIG_STACK_GROWSUP
  380. mpnt->vm_start = stack_base;
  381. mpnt->vm_end = stack_base + arg_size;
  382. #else
  383. mpnt->vm_end = stack_top;
  384. mpnt->vm_start = mpnt->vm_end - arg_size;
  385. #endif
  386. /* Adjust stack execute permissions; explicitly enable
  387. * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
  388. * and leave alone (arch default) otherwise. */
  389. if (unlikely(executable_stack == EXSTACK_ENABLE_X))
  390. mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
  391. else if (executable_stack == EXSTACK_DISABLE_X)
  392. mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
  393. else
  394. mpnt->vm_flags = VM_STACK_FLAGS;
  395. mpnt->vm_flags |= mm->def_flags;
  396. mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
  397. if ((ret = insert_vm_struct(mm, mpnt))) {
  398. up_write(&mm->mmap_sem);
  399. kmem_cache_free(vm_area_cachep, mpnt);
  400. return ret;
  401. }
  402. mm->stack_vm = mm->total_vm = vma_pages(mpnt);
  403. }
  404. for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
  405. struct page *page = bprm->page[i];
  406. if (page) {
  407. bprm->page[i] = NULL;
  408. install_arg_page(mpnt, page, stack_base);
  409. }
  410. stack_base += PAGE_SIZE;
  411. }
  412. up_write(&mm->mmap_sem);
  413. return 0;
  414. }
  415. EXPORT_SYMBOL(setup_arg_pages);
  416. #define free_arg_pages(bprm) do { } while (0)
  417. #else
  418. static inline void free_arg_pages(struct linux_binprm *bprm)
  419. {
  420. int i;
  421. for (i = 0; i < MAX_ARG_PAGES; i++) {
  422. if (bprm->page[i])
  423. __free_page(bprm->page[i]);
  424. bprm->page[i] = NULL;
  425. }
  426. }
  427. #endif /* CONFIG_MMU */
  428. struct file *open_exec(const char *name)
  429. {
  430. struct nameidata nd;
  431. int err;
  432. struct file *file;
  433. nd.intent.open.flags = FMODE_READ;
  434. err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
  435. file = ERR_PTR(err);
  436. if (!err) {
  437. struct inode *inode = nd.dentry->d_inode;
  438. file = ERR_PTR(-EACCES);
  439. if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
  440. S_ISREG(inode->i_mode)) {
  441. int err = permission(inode, MAY_EXEC, &nd);
  442. if (!err && !(inode->i_mode & 0111))
  443. err = -EACCES;
  444. file = ERR_PTR(err);
  445. if (!err) {
  446. file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
  447. if (!IS_ERR(file)) {
  448. err = deny_write_access(file);
  449. if (err) {
  450. fput(file);
  451. file = ERR_PTR(err);
  452. }
  453. }
  454. out:
  455. return file;
  456. }
  457. }
  458. path_release(&nd);
  459. }
  460. goto out;
  461. }
  462. EXPORT_SYMBOL(open_exec);
  463. int kernel_read(struct file *file, unsigned long offset,
  464. char *addr, unsigned long count)
  465. {
  466. mm_segment_t old_fs;
  467. loff_t pos = offset;
  468. int result;
  469. old_fs = get_fs();
  470. set_fs(get_ds());
  471. /* The cast to a user pointer is valid due to the set_fs() */
  472. result = vfs_read(file, (void __user *)addr, count, &pos);
  473. set_fs(old_fs);
  474. return result;
  475. }
  476. EXPORT_SYMBOL(kernel_read);
  477. static int exec_mmap(struct mm_struct *mm)
  478. {
  479. struct task_struct *tsk;
  480. struct mm_struct * old_mm, *active_mm;
  481. /* Notify parent that we're no longer interested in the old VM */
  482. tsk = current;
  483. old_mm = current->mm;
  484. mm_release(tsk, old_mm);
  485. if (old_mm) {
  486. /*
  487. * Make sure that if there is a core dump in progress
  488. * for the old mm, we get out and die instead of going
  489. * through with the exec. We must hold mmap_sem around
  490. * checking core_waiters and changing tsk->mm. The
  491. * core-inducing thread will increment core_waiters for
  492. * each thread whose ->mm == old_mm.
  493. */
  494. down_read(&old_mm->mmap_sem);
  495. if (unlikely(old_mm->core_waiters)) {
  496. up_read(&old_mm->mmap_sem);
  497. return -EINTR;
  498. }
  499. }
  500. task_lock(tsk);
  501. active_mm = tsk->active_mm;
  502. tsk->mm = mm;
  503. tsk->active_mm = mm;
  504. activate_mm(active_mm, mm);
  505. task_unlock(tsk);
  506. arch_pick_mmap_layout(mm);
  507. if (old_mm) {
  508. up_read(&old_mm->mmap_sem);
  509. if (active_mm != old_mm) BUG();
  510. mmput(old_mm);
  511. return 0;
  512. }
  513. mmdrop(active_mm);
  514. return 0;
  515. }
  516. /*
  517. * This function makes sure the current process has its own signal table,
  518. * so that flush_signal_handlers can later reset the handlers without
  519. * disturbing other processes. (Other processes might share the signal
  520. * table via the CLONE_SIGHAND option to clone().)
  521. */
  522. static inline int de_thread(struct task_struct *tsk)
  523. {
  524. struct signal_struct *sig = tsk->signal;
  525. struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
  526. spinlock_t *lock = &oldsighand->siglock;
  527. int count;
  528. /*
  529. * If we don't share sighandlers, then we aren't sharing anything
  530. * and we can just re-use it all.
  531. */
  532. if (atomic_read(&oldsighand->count) <= 1) {
  533. BUG_ON(atomic_read(&sig->count) != 1);
  534. exit_itimers(sig);
  535. return 0;
  536. }
  537. newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
  538. if (!newsighand)
  539. return -ENOMEM;
  540. if (thread_group_empty(current))
  541. goto no_thread_group;
  542. /*
  543. * Kill all other threads in the thread group.
  544. * We must hold tasklist_lock to call zap_other_threads.
  545. */
  546. read_lock(&tasklist_lock);
  547. spin_lock_irq(lock);
  548. if (sig->flags & SIGNAL_GROUP_EXIT) {
  549. /*
  550. * Another group action in progress, just
  551. * return so that the signal is processed.
  552. */
  553. spin_unlock_irq(lock);
  554. read_unlock(&tasklist_lock);
  555. kmem_cache_free(sighand_cachep, newsighand);
  556. return -EAGAIN;
  557. }
  558. zap_other_threads(current);
  559. read_unlock(&tasklist_lock);
  560. /*
  561. * Account for the thread group leader hanging around:
  562. */
  563. count = 2;
  564. if (thread_group_leader(current))
  565. count = 1;
  566. else {
  567. /*
  568. * The SIGALRM timer survives the exec, but needs to point
  569. * at us as the new group leader now. We have a race with
  570. * a timer firing now getting the old leader, so we need to
  571. * synchronize with any firing (by calling del_timer_sync)
  572. * before we can safely let the old group leader die.
  573. */
  574. sig->real_timer.data = (unsigned long)current;
  575. if (del_timer_sync(&sig->real_timer))
  576. add_timer(&sig->real_timer);
  577. }
  578. while (atomic_read(&sig->count) > count) {
  579. sig->group_exit_task = current;
  580. sig->notify_count = count;
  581. __set_current_state(TASK_UNINTERRUPTIBLE);
  582. spin_unlock_irq(lock);
  583. schedule();
  584. spin_lock_irq(lock);
  585. }
  586. sig->group_exit_task = NULL;
  587. sig->notify_count = 0;
  588. sig->real_timer.data = (unsigned long)current;
  589. spin_unlock_irq(lock);
  590. /*
  591. * At this point all other threads have exited, all we have to
  592. * do is to wait for the thread group leader to become inactive,
  593. * and to assume its PID:
  594. */
  595. if (!thread_group_leader(current)) {
  596. struct task_struct *leader = current->group_leader, *parent;
  597. struct dentry *proc_dentry1, *proc_dentry2;
  598. unsigned long exit_state, ptrace;
  599. /*
  600. * Wait for the thread group leader to be a zombie.
  601. * It should already be zombie at this point, most
  602. * of the time.
  603. */
  604. while (leader->exit_state != EXIT_ZOMBIE)
  605. yield();
  606. spin_lock(&leader->proc_lock);
  607. spin_lock(&current->proc_lock);
  608. proc_dentry1 = proc_pid_unhash(current);
  609. proc_dentry2 = proc_pid_unhash(leader);
  610. write_lock_irq(&tasklist_lock);
  611. BUG_ON(leader->tgid != current->tgid);
  612. BUG_ON(current->pid == current->tgid);
  613. /*
  614. * An exec() starts a new thread group with the
  615. * TGID of the previous thread group. Rehash the
  616. * two threads with a switched PID, and release
  617. * the former thread group leader:
  618. */
  619. ptrace = leader->ptrace;
  620. parent = leader->parent;
  621. if (unlikely(ptrace) && unlikely(parent == current)) {
  622. /*
  623. * Joker was ptracing his own group leader,
  624. * and now he wants to be his own parent!
  625. * We can't have that.
  626. */
  627. ptrace = 0;
  628. }
  629. ptrace_unlink(current);
  630. ptrace_unlink(leader);
  631. remove_parent(current);
  632. remove_parent(leader);
  633. switch_exec_pids(leader, current);
  634. current->parent = current->real_parent = leader->real_parent;
  635. leader->parent = leader->real_parent = child_reaper;
  636. current->group_leader = current;
  637. leader->group_leader = leader;
  638. add_parent(current, current->parent);
  639. add_parent(leader, leader->parent);
  640. if (ptrace) {
  641. current->ptrace = ptrace;
  642. __ptrace_link(current, parent);
  643. }
  644. list_del(&current->tasks);
  645. list_add_tail(&current->tasks, &init_task.tasks);
  646. current->exit_signal = SIGCHLD;
  647. exit_state = leader->exit_state;
  648. write_unlock_irq(&tasklist_lock);
  649. spin_unlock(&leader->proc_lock);
  650. spin_unlock(&current->proc_lock);
  651. proc_pid_flush(proc_dentry1);
  652. proc_pid_flush(proc_dentry2);
  653. BUG_ON(exit_state != EXIT_ZOMBIE);
  654. release_task(leader);
  655. }
  656. /*
  657. * There may be one thread left which is just exiting,
  658. * but it's safe to stop telling the group to kill themselves.
  659. */
  660. sig->flags = 0;
  661. no_thread_group:
  662. BUG_ON(atomic_read(&sig->count) != 1);
  663. exit_itimers(sig);
  664. if (atomic_read(&oldsighand->count) == 1) {
  665. /*
  666. * Now that we nuked the rest of the thread group,
  667. * it turns out we are not sharing sighand any more either.
  668. * So we can just keep it.
  669. */
  670. kmem_cache_free(sighand_cachep, newsighand);
  671. } else {
  672. /*
  673. * Move our state over to newsighand and switch it in.
  674. */
  675. spin_lock_init(&newsighand->siglock);
  676. atomic_set(&newsighand->count, 1);
  677. memcpy(newsighand->action, oldsighand->action,
  678. sizeof(newsighand->action));
  679. write_lock_irq(&tasklist_lock);
  680. spin_lock(&oldsighand->siglock);
  681. spin_lock(&newsighand->siglock);
  682. current->sighand = newsighand;
  683. recalc_sigpending();
  684. spin_unlock(&newsighand->siglock);
  685. spin_unlock(&oldsighand->siglock);
  686. write_unlock_irq(&tasklist_lock);
  687. if (atomic_dec_and_test(&oldsighand->count))
  688. kmem_cache_free(sighand_cachep, oldsighand);
  689. }
  690. BUG_ON(!thread_group_leader(current));
  691. return 0;
  692. }
  693. /*
  694. * These functions flushes out all traces of the currently running executable
  695. * so that a new one can be started
  696. */
  697. static inline void flush_old_files(struct files_struct * files)
  698. {
  699. long j = -1;
  700. struct fdtable *fdt;
  701. spin_lock(&files->file_lock);
  702. for (;;) {
  703. unsigned long set, i;
  704. j++;
  705. i = j * __NFDBITS;
  706. fdt = files_fdtable(files);
  707. if (i >= fdt->max_fds || i >= fdt->max_fdset)
  708. break;
  709. set = fdt->close_on_exec->fds_bits[j];
  710. if (!set)
  711. continue;
  712. fdt->close_on_exec->fds_bits[j] = 0;
  713. spin_unlock(&files->file_lock);
  714. for ( ; set ; i++,set >>= 1) {
  715. if (set & 1) {
  716. sys_close(i);
  717. }
  718. }
  719. spin_lock(&files->file_lock);
  720. }
  721. spin_unlock(&files->file_lock);
  722. }
  723. void get_task_comm(char *buf, struct task_struct *tsk)
  724. {
  725. /* buf must be at least sizeof(tsk->comm) in size */
  726. task_lock(tsk);
  727. strncpy(buf, tsk->comm, sizeof(tsk->comm));
  728. task_unlock(tsk);
  729. }
  730. void set_task_comm(struct task_struct *tsk, char *buf)
  731. {
  732. task_lock(tsk);
  733. strlcpy(tsk->comm, buf, sizeof(tsk->comm));
  734. task_unlock(tsk);
  735. }
  736. int flush_old_exec(struct linux_binprm * bprm)
  737. {
  738. char * name;
  739. int i, ch, retval;
  740. struct files_struct *files;
  741. char tcomm[sizeof(current->comm)];
  742. /*
  743. * Make sure we have a private signal table and that
  744. * we are unassociated from the previous thread group.
  745. */
  746. retval = de_thread(current);
  747. if (retval)
  748. goto out;
  749. /*
  750. * Make sure we have private file handles. Ask the
  751. * fork helper to do the work for us and the exit
  752. * helper to do the cleanup of the old one.
  753. */
  754. files = current->files; /* refcounted so safe to hold */
  755. retval = unshare_files();
  756. if (retval)
  757. goto out;
  758. /*
  759. * Release all of the old mmap stuff
  760. */
  761. retval = exec_mmap(bprm->mm);
  762. if (retval)
  763. goto mmap_failed;
  764. bprm->mm = NULL; /* We're using it now */
  765. /* This is the point of no return */
  766. steal_locks(files);
  767. put_files_struct(files);
  768. current->sas_ss_sp = current->sas_ss_size = 0;
  769. if (current->euid == current->uid && current->egid == current->gid)
  770. current->mm->dumpable = 1;
  771. else
  772. current->mm->dumpable = suid_dumpable;
  773. name = bprm->filename;
  774. /* Copies the binary name from after last slash */
  775. for (i=0; (ch = *(name++)) != '\0';) {
  776. if (ch == '/')
  777. i = 0; /* overwrite what we wrote */
  778. else
  779. if (i < (sizeof(tcomm) - 1))
  780. tcomm[i++] = ch;
  781. }
  782. tcomm[i] = '\0';
  783. set_task_comm(current, tcomm);
  784. current->flags &= ~PF_RANDOMIZE;
  785. flush_thread();
  786. if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
  787. permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) ||
  788. (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
  789. suid_keys(current);
  790. current->mm->dumpable = suid_dumpable;
  791. }
  792. /* An exec changes our domain. We are no longer part of the thread
  793. group */
  794. current->self_exec_id++;
  795. flush_signal_handlers(current, 0);
  796. flush_old_files(current->files);
  797. return 0;
  798. mmap_failed:
  799. put_files_struct(current->files);
  800. current->files = files;
  801. out:
  802. return retval;
  803. }
  804. EXPORT_SYMBOL(flush_old_exec);
  805. /*
  806. * Fill the binprm structure from the inode.
  807. * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
  808. */
  809. int prepare_binprm(struct linux_binprm *bprm)
  810. {
  811. int mode;
  812. struct inode * inode = bprm->file->f_dentry->d_inode;
  813. int retval;
  814. mode = inode->i_mode;
  815. /*
  816. * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
  817. * generic_permission lets a non-executable through
  818. */
  819. if (!(mode & 0111)) /* with at least _one_ execute bit set */
  820. return -EACCES;
  821. if (bprm->file->f_op == NULL)
  822. return -EACCES;
  823. bprm->e_uid = current->euid;
  824. bprm->e_gid = current->egid;
  825. if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
  826. /* Set-uid? */
  827. if (mode & S_ISUID) {
  828. current->personality &= ~PER_CLEAR_ON_SETID;
  829. bprm->e_uid = inode->i_uid;
  830. }
  831. /* Set-gid? */
  832. /*
  833. * If setgid is set but no group execute bit then this
  834. * is a candidate for mandatory locking, not a setgid
  835. * executable.
  836. */
  837. if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
  838. current->personality &= ~PER_CLEAR_ON_SETID;
  839. bprm->e_gid = inode->i_gid;
  840. }
  841. }
  842. /* fill in binprm security blob */
  843. retval = security_bprm_set(bprm);
  844. if (retval)
  845. return retval;
  846. memset(bprm->buf,0,BINPRM_BUF_SIZE);
  847. return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
  848. }
  849. EXPORT_SYMBOL(prepare_binprm);
  850. static inline int unsafe_exec(struct task_struct *p)
  851. {
  852. int unsafe = 0;
  853. if (p->ptrace & PT_PTRACED) {
  854. if (p->ptrace & PT_PTRACE_CAP)
  855. unsafe |= LSM_UNSAFE_PTRACE_CAP;
  856. else
  857. unsafe |= LSM_UNSAFE_PTRACE;
  858. }
  859. if (atomic_read(&p->fs->count) > 1 ||
  860. atomic_read(&p->files->count) > 1 ||
  861. atomic_read(&p->sighand->count) > 1)
  862. unsafe |= LSM_UNSAFE_SHARE;
  863. return unsafe;
  864. }
  865. void compute_creds(struct linux_binprm *bprm)
  866. {
  867. int unsafe;
  868. if (bprm->e_uid != current->uid)
  869. suid_keys(current);
  870. exec_keys(current);
  871. task_lock(current);
  872. unsafe = unsafe_exec(current);
  873. security_bprm_apply_creds(bprm, unsafe);
  874. task_unlock(current);
  875. security_bprm_post_apply_creds(bprm);
  876. }
  877. EXPORT_SYMBOL(compute_creds);
  878. void remove_arg_zero(struct linux_binprm *bprm)
  879. {
  880. if (bprm->argc) {
  881. unsigned long offset;
  882. char * kaddr;
  883. struct page *page;
  884. offset = bprm->p % PAGE_SIZE;
  885. goto inside;
  886. while (bprm->p++, *(kaddr+offset++)) {
  887. if (offset != PAGE_SIZE)
  888. continue;
  889. offset = 0;
  890. kunmap_atomic(kaddr, KM_USER0);
  891. inside:
  892. page = bprm->page[bprm->p/PAGE_SIZE];
  893. kaddr = kmap_atomic(page, KM_USER0);
  894. }
  895. kunmap_atomic(kaddr, KM_USER0);
  896. bprm->argc--;
  897. }
  898. }
  899. EXPORT_SYMBOL(remove_arg_zero);
  900. /*
  901. * cycle the list of binary formats handler, until one recognizes the image
  902. */
  903. int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
  904. {
  905. int try,retval;
  906. struct linux_binfmt *fmt;
  907. #ifdef __alpha__
  908. /* handle /sbin/loader.. */
  909. {
  910. struct exec * eh = (struct exec *) bprm->buf;
  911. if (!bprm->loader && eh->fh.f_magic == 0x183 &&
  912. (eh->fh.f_flags & 0x3000) == 0x3000)
  913. {
  914. struct file * file;
  915. unsigned long loader;
  916. allow_write_access(bprm->file);
  917. fput(bprm->file);
  918. bprm->file = NULL;
  919. loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
  920. file = open_exec("/sbin/loader");
  921. retval = PTR_ERR(file);
  922. if (IS_ERR(file))
  923. return retval;
  924. /* Remember if the application is TASO. */
  925. bprm->sh_bang = eh->ah.entry < 0x100000000UL;
  926. bprm->file = file;
  927. bprm->loader = loader;
  928. retval = prepare_binprm(bprm);
  929. if (retval<0)
  930. return retval;
  931. /* should call search_binary_handler recursively here,
  932. but it does not matter */
  933. }
  934. }
  935. #endif
  936. retval = security_bprm_check(bprm);
  937. if (retval)
  938. return retval;
  939. /* kernel module loader fixup */
  940. /* so we don't try to load run modprobe in kernel space. */
  941. set_fs(USER_DS);
  942. retval = -ENOENT;
  943. for (try=0; try<2; try++) {
  944. read_lock(&binfmt_lock);
  945. for (fmt = formats ; fmt ; fmt = fmt->next) {
  946. int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
  947. if (!fn)
  948. continue;
  949. if (!try_module_get(fmt->module))
  950. continue;
  951. read_unlock(&binfmt_lock);
  952. retval = fn(bprm, regs);
  953. if (retval >= 0) {
  954. put_binfmt(fmt);
  955. allow_write_access(bprm->file);
  956. if (bprm->file)
  957. fput(bprm->file);
  958. bprm->file = NULL;
  959. current->did_exec = 1;
  960. return retval;
  961. }
  962. read_lock(&binfmt_lock);
  963. put_binfmt(fmt);
  964. if (retval != -ENOEXEC || bprm->mm == NULL)
  965. break;
  966. if (!bprm->file) {
  967. read_unlock(&binfmt_lock);
  968. return retval;
  969. }
  970. }
  971. read_unlock(&binfmt_lock);
  972. if (retval != -ENOEXEC || bprm->mm == NULL) {
  973. break;
  974. #ifdef CONFIG_KMOD
  975. }else{
  976. #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
  977. if (printable(bprm->buf[0]) &&
  978. printable(bprm->buf[1]) &&
  979. printable(bprm->buf[2]) &&
  980. printable(bprm->buf[3]))
  981. break; /* -ENOEXEC */
  982. request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
  983. #endif
  984. }
  985. }
  986. return retval;
  987. }
  988. EXPORT_SYMBOL(search_binary_handler);
  989. /*
  990. * sys_execve() executes a new program.
  991. */
  992. int do_execve(char * filename,
  993. char __user *__user *argv,
  994. char __user *__user *envp,
  995. struct pt_regs * regs)
  996. {
  997. struct linux_binprm *bprm;
  998. struct file *file;
  999. int retval;
  1000. int i;
  1001. retval = -ENOMEM;
  1002. bprm = kmalloc(sizeof(*bprm), GFP_KERNEL);
  1003. if (!bprm)
  1004. goto out_ret;
  1005. memset(bprm, 0, sizeof(*bprm));
  1006. file = open_exec(filename);
  1007. retval = PTR_ERR(file);
  1008. if (IS_ERR(file))
  1009. goto out_kfree;
  1010. sched_exec();
  1011. bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
  1012. bprm->file = file;
  1013. bprm->filename = filename;
  1014. bprm->interp = filename;
  1015. bprm->mm = mm_alloc();
  1016. retval = -ENOMEM;
  1017. if (!bprm->mm)
  1018. goto out_file;
  1019. retval = init_new_context(current, bprm->mm);
  1020. if (retval < 0)
  1021. goto out_mm;
  1022. bprm->argc = count(argv, bprm->p / sizeof(void *));
  1023. if ((retval = bprm->argc) < 0)
  1024. goto out_mm;
  1025. bprm->envc = count(envp, bprm->p / sizeof(void *));
  1026. if ((retval = bprm->envc) < 0)
  1027. goto out_mm;
  1028. retval = security_bprm_alloc(bprm);
  1029. if (retval)
  1030. goto out;
  1031. retval = prepare_binprm(bprm);
  1032. if (retval < 0)
  1033. goto out;
  1034. retval = copy_strings_kernel(1, &bprm->filename, bprm);
  1035. if (retval < 0)
  1036. goto out;
  1037. bprm->exec = bprm->p;
  1038. retval = copy_strings(bprm->envc, envp, bprm);
  1039. if (retval < 0)
  1040. goto out;
  1041. retval = copy_strings(bprm->argc, argv, bprm);
  1042. if (retval < 0)
  1043. goto out;
  1044. retval = search_binary_handler(bprm,regs);
  1045. if (retval >= 0) {
  1046. free_arg_pages(bprm);
  1047. /* execve success */
  1048. security_bprm_free(bprm);
  1049. acct_update_integrals(current);
  1050. update_mem_hiwater(current);
  1051. kfree(bprm);
  1052. return retval;
  1053. }
  1054. out:
  1055. /* Something went wrong, return the inode and free the argument pages*/
  1056. for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
  1057. struct page * page = bprm->page[i];
  1058. if (page)
  1059. __free_page(page);
  1060. }
  1061. if (bprm->security)
  1062. security_bprm_free(bprm);
  1063. out_mm:
  1064. if (bprm->mm)
  1065. mmdrop(bprm->mm);
  1066. out_file:
  1067. if (bprm->file) {
  1068. allow_write_access(bprm->file);
  1069. fput(bprm->file);
  1070. }
  1071. out_kfree:
  1072. kfree(bprm);
  1073. out_ret:
  1074. return retval;
  1075. }
  1076. int set_binfmt(struct linux_binfmt *new)
  1077. {
  1078. struct linux_binfmt *old = current->binfmt;
  1079. if (new) {
  1080. if (!try_module_get(new->module))
  1081. return -1;
  1082. }
  1083. current->binfmt = new;
  1084. if (old)
  1085. module_put(old->module);
  1086. return 0;
  1087. }
  1088. EXPORT_SYMBOL(set_binfmt);
  1089. #define CORENAME_MAX_SIZE 64
  1090. /* format_corename will inspect the pattern parameter, and output a
  1091. * name into corename, which must have space for at least
  1092. * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
  1093. */
  1094. static void format_corename(char *corename, const char *pattern, long signr)
  1095. {
  1096. const char *pat_ptr = pattern;
  1097. char *out_ptr = corename;
  1098. char *const out_end = corename + CORENAME_MAX_SIZE;
  1099. int rc;
  1100. int pid_in_pattern = 0;
  1101. /* Repeat as long as we have more pattern to process and more output
  1102. space */
  1103. while (*pat_ptr) {
  1104. if (*pat_ptr != '%') {
  1105. if (out_ptr == out_end)
  1106. goto out;
  1107. *out_ptr++ = *pat_ptr++;
  1108. } else {
  1109. switch (*++pat_ptr) {
  1110. case 0:
  1111. goto out;
  1112. /* Double percent, output one percent */
  1113. case '%':
  1114. if (out_ptr == out_end)
  1115. goto out;
  1116. *out_ptr++ = '%';
  1117. break;
  1118. /* pid */
  1119. case 'p':
  1120. pid_in_pattern = 1;
  1121. rc = snprintf(out_ptr, out_end - out_ptr,
  1122. "%d", current->tgid);
  1123. if (rc > out_end - out_ptr)
  1124. goto out;
  1125. out_ptr += rc;
  1126. break;
  1127. /* uid */
  1128. case 'u':
  1129. rc = snprintf(out_ptr, out_end - out_ptr,
  1130. "%d", current->uid);
  1131. if (rc > out_end - out_ptr)
  1132. goto out;
  1133. out_ptr += rc;
  1134. break;
  1135. /* gid */
  1136. case 'g':
  1137. rc = snprintf(out_ptr, out_end - out_ptr,
  1138. "%d", current->gid);
  1139. if (rc > out_end - out_ptr)
  1140. goto out;
  1141. out_ptr += rc;
  1142. break;
  1143. /* signal that caused the coredump */
  1144. case 's':
  1145. rc = snprintf(out_ptr, out_end - out_ptr,
  1146. "%ld", signr);
  1147. if (rc > out_end - out_ptr)
  1148. goto out;
  1149. out_ptr += rc;
  1150. break;
  1151. /* UNIX time of coredump */
  1152. case 't': {
  1153. struct timeval tv;
  1154. do_gettimeofday(&tv);
  1155. rc = snprintf(out_ptr, out_end - out_ptr,
  1156. "%lu", tv.tv_sec);
  1157. if (rc > out_end - out_ptr)
  1158. goto out;
  1159. out_ptr += rc;
  1160. break;
  1161. }
  1162. /* hostname */
  1163. case 'h':
  1164. down_read(&uts_sem);
  1165. rc = snprintf(out_ptr, out_end - out_ptr,
  1166. "%s", system_utsname.nodename);
  1167. up_read(&uts_sem);
  1168. if (rc > out_end - out_ptr)
  1169. goto out;
  1170. out_ptr += rc;
  1171. break;
  1172. /* executable */
  1173. case 'e':
  1174. rc = snprintf(out_ptr, out_end - out_ptr,
  1175. "%s", current->comm);
  1176. if (rc > out_end - out_ptr)
  1177. goto out;
  1178. out_ptr += rc;
  1179. break;
  1180. default:
  1181. break;
  1182. }
  1183. ++pat_ptr;
  1184. }
  1185. }
  1186. /* Backward compatibility with core_uses_pid:
  1187. *
  1188. * If core_pattern does not include a %p (as is the default)
  1189. * and core_uses_pid is set, then .%pid will be appended to
  1190. * the filename */
  1191. if (!pid_in_pattern
  1192. && (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) {
  1193. rc = snprintf(out_ptr, out_end - out_ptr,
  1194. ".%d", current->tgid);
  1195. if (rc > out_end - out_ptr)
  1196. goto out;
  1197. out_ptr += rc;
  1198. }
  1199. out:
  1200. *out_ptr = 0;
  1201. }
  1202. static void zap_threads (struct mm_struct *mm)
  1203. {
  1204. struct task_struct *g, *p;
  1205. struct task_struct *tsk = current;
  1206. struct completion *vfork_done = tsk->vfork_done;
  1207. int traced = 0;
  1208. /*
  1209. * Make sure nobody is waiting for us to release the VM,
  1210. * otherwise we can deadlock when we wait on each other
  1211. */
  1212. if (vfork_done) {
  1213. tsk->vfork_done = NULL;
  1214. complete(vfork_done);
  1215. }
  1216. read_lock(&tasklist_lock);
  1217. do_each_thread(g,p)
  1218. if (mm == p->mm && p != tsk) {
  1219. force_sig_specific(SIGKILL, p);
  1220. mm->core_waiters++;
  1221. if (unlikely(p->ptrace) &&
  1222. unlikely(p->parent->mm == mm))
  1223. traced = 1;
  1224. }
  1225. while_each_thread(g,p);
  1226. read_unlock(&tasklist_lock);
  1227. if (unlikely(traced)) {
  1228. /*
  1229. * We are zapping a thread and the thread it ptraces.
  1230. * If the tracee went into a ptrace stop for exit tracing,
  1231. * we could deadlock since the tracer is waiting for this
  1232. * coredump to finish. Detach them so they can both die.
  1233. */
  1234. write_lock_irq(&tasklist_lock);
  1235. do_each_thread(g,p) {
  1236. if (mm == p->mm && p != tsk &&
  1237. p->ptrace && p->parent->mm == mm) {
  1238. __ptrace_unlink(p);
  1239. }
  1240. } while_each_thread(g,p);
  1241. write_unlock_irq(&tasklist_lock);
  1242. }
  1243. }
  1244. static void coredump_wait(struct mm_struct *mm)
  1245. {
  1246. DECLARE_COMPLETION(startup_done);
  1247. mm->core_waiters++; /* let other threads block */
  1248. mm->core_startup_done = &startup_done;
  1249. /* give other threads a chance to run: */
  1250. yield();
  1251. zap_threads(mm);
  1252. if (--mm->core_waiters) {
  1253. up_write(&mm->mmap_sem);
  1254. wait_for_completion(&startup_done);
  1255. } else
  1256. up_write(&mm->mmap_sem);
  1257. BUG_ON(mm->core_waiters);
  1258. }
  1259. int do_coredump(long signr, int exit_code, struct pt_regs * regs)
  1260. {
  1261. char corename[CORENAME_MAX_SIZE + 1];
  1262. struct mm_struct *mm = current->mm;
  1263. struct linux_binfmt * binfmt;
  1264. struct inode * inode;
  1265. struct file * file;
  1266. int retval = 0;
  1267. int fsuid = current->fsuid;
  1268. int flag = 0;
  1269. binfmt = current->binfmt;
  1270. if (!binfmt || !binfmt->core_dump)
  1271. goto fail;
  1272. down_write(&mm->mmap_sem);
  1273. if (!mm->dumpable) {
  1274. up_write(&mm->mmap_sem);
  1275. goto fail;
  1276. }
  1277. /*
  1278. * We cannot trust fsuid as being the "true" uid of the
  1279. * process nor do we know its entire history. We only know it
  1280. * was tainted so we dump it as root in mode 2.
  1281. */
  1282. if (mm->dumpable == 2) { /* Setuid core dump mode */
  1283. flag = O_EXCL; /* Stop rewrite attacks */
  1284. current->fsuid = 0; /* Dump root private */
  1285. }
  1286. mm->dumpable = 0;
  1287. init_completion(&mm->core_done);
  1288. spin_lock_irq(&current->sighand->siglock);
  1289. current->signal->flags = SIGNAL_GROUP_EXIT;
  1290. current->signal->group_exit_code = exit_code;
  1291. spin_unlock_irq(&current->sighand->siglock);
  1292. coredump_wait(mm);
  1293. /*
  1294. * Clear any false indication of pending signals that might
  1295. * be seen by the filesystem code called to write the core file.
  1296. */
  1297. current->signal->group_stop_count = 0;
  1298. clear_thread_flag(TIF_SIGPENDING);
  1299. if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
  1300. goto fail_unlock;
  1301. /*
  1302. * lock_kernel() because format_corename() is controlled by sysctl, which
  1303. * uses lock_kernel()
  1304. */
  1305. lock_kernel();
  1306. format_corename(corename, core_pattern, signr);
  1307. unlock_kernel();
  1308. file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 0600);
  1309. if (IS_ERR(file))
  1310. goto fail_unlock;
  1311. inode = file->f_dentry->d_inode;
  1312. if (inode->i_nlink > 1)
  1313. goto close_fail; /* multiple links - don't dump */
  1314. if (d_unhashed(file->f_dentry))
  1315. goto close_fail;
  1316. if (!S_ISREG(inode->i_mode))
  1317. goto close_fail;
  1318. if (!file->f_op)
  1319. goto close_fail;
  1320. if (!file->f_op->write)
  1321. goto close_fail;
  1322. if (do_truncate(file->f_dentry, 0) != 0)
  1323. goto close_fail;
  1324. retval = binfmt->core_dump(signr, regs, file);
  1325. if (retval)
  1326. current->signal->group_exit_code |= 0x80;
  1327. close_fail:
  1328. filp_close(file, NULL);
  1329. fail_unlock:
  1330. current->fsuid = fsuid;
  1331. complete_all(&mm->core_done);
  1332. fail:
  1333. return retval;
  1334. }