exec.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497
  1. /*
  2. * linux/fs/exec.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. /*
  7. * #!-checking implemented by tytso.
  8. */
  9. /*
  10. * Demand-loading implemented 01.12.91 - no need to read anything but
  11. * the header into memory. The inode of the executable is put into
  12. * "current->executable", and page faults do the actual loading. Clean.
  13. *
  14. * Once more I can proudly say that linux stood up to being changed: it
  15. * was less than 2 hours work to get demand-loading completely implemented.
  16. *
  17. * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
  18. * current->executable is only used by the procfs. This allows a dispatch
  19. * table to check for several different types of binary formats. We keep
  20. * trying until we recognize the file or we run out of supported binary
  21. * formats.
  22. */
  23. #include <linux/config.h>
  24. #include <linux/slab.h>
  25. #include <linux/file.h>
  26. #include <linux/mman.h>
  27. #include <linux/a.out.h>
  28. #include <linux/stat.h>
  29. #include <linux/fcntl.h>
  30. #include <linux/smp_lock.h>
  31. #include <linux/init.h>
  32. #include <linux/pagemap.h>
  33. #include <linux/highmem.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/key.h>
  36. #include <linux/personality.h>
  37. #include <linux/binfmts.h>
  38. #include <linux/swap.h>
  39. #include <linux/utsname.h>
  40. #include <linux/module.h>
  41. #include <linux/namei.h>
  42. #include <linux/proc_fs.h>
  43. #include <linux/ptrace.h>
  44. #include <linux/mount.h>
  45. #include <linux/security.h>
  46. #include <linux/syscalls.h>
  47. #include <linux/rmap.h>
  48. #include <linux/acct.h>
  49. #include <asm/uaccess.h>
  50. #include <asm/mmu_context.h>
  51. #ifdef CONFIG_KMOD
  52. #include <linux/kmod.h>
  53. #endif
  54. int core_uses_pid;
  55. char core_pattern[65] = "core";
  56. /* The maximal length of core_pattern is also specified in sysctl.c */
  57. static struct linux_binfmt *formats;
  58. static DEFINE_RWLOCK(binfmt_lock);
  59. int register_binfmt(struct linux_binfmt * fmt)
  60. {
  61. struct linux_binfmt ** tmp = &formats;
  62. if (!fmt)
  63. return -EINVAL;
  64. if (fmt->next)
  65. return -EBUSY;
  66. write_lock(&binfmt_lock);
  67. while (*tmp) {
  68. if (fmt == *tmp) {
  69. write_unlock(&binfmt_lock);
  70. return -EBUSY;
  71. }
  72. tmp = &(*tmp)->next;
  73. }
  74. fmt->next = formats;
  75. formats = fmt;
  76. write_unlock(&binfmt_lock);
  77. return 0;
  78. }
  79. EXPORT_SYMBOL(register_binfmt);
  80. int unregister_binfmt(struct linux_binfmt * fmt)
  81. {
  82. struct linux_binfmt ** tmp = &formats;
  83. write_lock(&binfmt_lock);
  84. while (*tmp) {
  85. if (fmt == *tmp) {
  86. *tmp = fmt->next;
  87. write_unlock(&binfmt_lock);
  88. return 0;
  89. }
  90. tmp = &(*tmp)->next;
  91. }
  92. write_unlock(&binfmt_lock);
  93. return -EINVAL;
  94. }
  95. EXPORT_SYMBOL(unregister_binfmt);
  96. static inline void put_binfmt(struct linux_binfmt * fmt)
  97. {
  98. module_put(fmt->module);
  99. }
  100. /*
  101. * Note that a shared library must be both readable and executable due to
  102. * security reasons.
  103. *
  104. * Also note that we take the address to load from from the file itself.
  105. */
  106. asmlinkage long sys_uselib(const char __user * library)
  107. {
  108. struct file * file;
  109. struct nameidata nd;
  110. int error;
  111. nd.intent.open.flags = FMODE_READ;
  112. error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
  113. if (error)
  114. goto out;
  115. error = -EINVAL;
  116. if (!S_ISREG(nd.dentry->d_inode->i_mode))
  117. goto exit;
  118. error = permission(nd.dentry->d_inode, MAY_READ | MAY_EXEC, &nd);
  119. if (error)
  120. goto exit;
  121. file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
  122. error = PTR_ERR(file);
  123. if (IS_ERR(file))
  124. goto out;
  125. error = -ENOEXEC;
  126. if(file->f_op) {
  127. struct linux_binfmt * fmt;
  128. read_lock(&binfmt_lock);
  129. for (fmt = formats ; fmt ; fmt = fmt->next) {
  130. if (!fmt->load_shlib)
  131. continue;
  132. if (!try_module_get(fmt->module))
  133. continue;
  134. read_unlock(&binfmt_lock);
  135. error = fmt->load_shlib(file);
  136. read_lock(&binfmt_lock);
  137. put_binfmt(fmt);
  138. if (error != -ENOEXEC)
  139. break;
  140. }
  141. read_unlock(&binfmt_lock);
  142. }
  143. fput(file);
  144. out:
  145. return error;
  146. exit:
  147. path_release(&nd);
  148. goto out;
  149. }
  150. /*
  151. * count() counts the number of strings in array ARGV.
  152. */
  153. static int count(char __user * __user * argv, int max)
  154. {
  155. int i = 0;
  156. if (argv != NULL) {
  157. for (;;) {
  158. char __user * p;
  159. if (get_user(p, argv))
  160. return -EFAULT;
  161. if (!p)
  162. break;
  163. argv++;
  164. if(++i > max)
  165. return -E2BIG;
  166. cond_resched();
  167. }
  168. }
  169. return i;
  170. }
  171. /*
  172. * 'copy_strings()' copies argument/environment strings from user
  173. * memory to free pages in kernel mem. These are in a format ready
  174. * to be put directly into the top of new user memory.
  175. */
  176. static int copy_strings(int argc, char __user * __user * argv,
  177. struct linux_binprm *bprm)
  178. {
  179. struct page *kmapped_page = NULL;
  180. char *kaddr = NULL;
  181. int ret;
  182. while (argc-- > 0) {
  183. char __user *str;
  184. int len;
  185. unsigned long pos;
  186. if (get_user(str, argv+argc) ||
  187. !(len = strnlen_user(str, bprm->p))) {
  188. ret = -EFAULT;
  189. goto out;
  190. }
  191. if (bprm->p < len) {
  192. ret = -E2BIG;
  193. goto out;
  194. }
  195. bprm->p -= len;
  196. /* XXX: add architecture specific overflow check here. */
  197. pos = bprm->p;
  198. while (len > 0) {
  199. int i, new, err;
  200. int offset, bytes_to_copy;
  201. struct page *page;
  202. offset = pos % PAGE_SIZE;
  203. i = pos/PAGE_SIZE;
  204. page = bprm->page[i];
  205. new = 0;
  206. if (!page) {
  207. page = alloc_page(GFP_HIGHUSER);
  208. bprm->page[i] = page;
  209. if (!page) {
  210. ret = -ENOMEM;
  211. goto out;
  212. }
  213. new = 1;
  214. }
  215. if (page != kmapped_page) {
  216. if (kmapped_page)
  217. kunmap(kmapped_page);
  218. kmapped_page = page;
  219. kaddr = kmap(kmapped_page);
  220. }
  221. if (new && offset)
  222. memset(kaddr, 0, offset);
  223. bytes_to_copy = PAGE_SIZE - offset;
  224. if (bytes_to_copy > len) {
  225. bytes_to_copy = len;
  226. if (new)
  227. memset(kaddr+offset+len, 0,
  228. PAGE_SIZE-offset-len);
  229. }
  230. err = copy_from_user(kaddr+offset, str, bytes_to_copy);
  231. if (err) {
  232. ret = -EFAULT;
  233. goto out;
  234. }
  235. pos += bytes_to_copy;
  236. str += bytes_to_copy;
  237. len -= bytes_to_copy;
  238. }
  239. }
  240. ret = 0;
  241. out:
  242. if (kmapped_page)
  243. kunmap(kmapped_page);
  244. return ret;
  245. }
  246. /*
  247. * Like copy_strings, but get argv and its values from kernel memory.
  248. */
  249. int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
  250. {
  251. int r;
  252. mm_segment_t oldfs = get_fs();
  253. set_fs(KERNEL_DS);
  254. r = copy_strings(argc, (char __user * __user *)argv, bprm);
  255. set_fs(oldfs);
  256. return r;
  257. }
  258. EXPORT_SYMBOL(copy_strings_kernel);
  259. #ifdef CONFIG_MMU
  260. /*
  261. * This routine is used to map in a page into an address space: needed by
  262. * execve() for the initial stack and environment pages.
  263. *
  264. * vma->vm_mm->mmap_sem is held for writing.
  265. */
  266. void install_arg_page(struct vm_area_struct *vma,
  267. struct page *page, unsigned long address)
  268. {
  269. struct mm_struct *mm = vma->vm_mm;
  270. pgd_t * pgd;
  271. pud_t * pud;
  272. pmd_t * pmd;
  273. pte_t * pte;
  274. if (unlikely(anon_vma_prepare(vma)))
  275. goto out_sig;
  276. flush_dcache_page(page);
  277. pgd = pgd_offset(mm, address);
  278. spin_lock(&mm->page_table_lock);
  279. pud = pud_alloc(mm, pgd, address);
  280. if (!pud)
  281. goto out;
  282. pmd = pmd_alloc(mm, pud, address);
  283. if (!pmd)
  284. goto out;
  285. pte = pte_alloc_map(mm, pmd, address);
  286. if (!pte)
  287. goto out;
  288. if (!pte_none(*pte)) {
  289. pte_unmap(pte);
  290. goto out;
  291. }
  292. inc_mm_counter(mm, rss);
  293. lru_cache_add_active(page);
  294. set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
  295. page, vma->vm_page_prot))));
  296. page_add_anon_rmap(page, vma, address);
  297. pte_unmap(pte);
  298. spin_unlock(&mm->page_table_lock);
  299. /* no need for flush_tlb */
  300. return;
  301. out:
  302. spin_unlock(&mm->page_table_lock);
  303. out_sig:
  304. __free_page(page);
  305. force_sig(SIGKILL, current);
  306. }
  307. #define EXTRA_STACK_VM_PAGES 20 /* random */
  308. int setup_arg_pages(struct linux_binprm *bprm,
  309. unsigned long stack_top,
  310. int executable_stack)
  311. {
  312. unsigned long stack_base;
  313. struct vm_area_struct *mpnt;
  314. struct mm_struct *mm = current->mm;
  315. int i, ret;
  316. long arg_size;
  317. #ifdef CONFIG_STACK_GROWSUP
  318. /* Move the argument and environment strings to the bottom of the
  319. * stack space.
  320. */
  321. int offset, j;
  322. char *to, *from;
  323. /* Start by shifting all the pages down */
  324. i = 0;
  325. for (j = 0; j < MAX_ARG_PAGES; j++) {
  326. struct page *page = bprm->page[j];
  327. if (!page)
  328. continue;
  329. bprm->page[i++] = page;
  330. }
  331. /* Now move them within their pages */
  332. offset = bprm->p % PAGE_SIZE;
  333. to = kmap(bprm->page[0]);
  334. for (j = 1; j < i; j++) {
  335. memmove(to, to + offset, PAGE_SIZE - offset);
  336. from = kmap(bprm->page[j]);
  337. memcpy(to + PAGE_SIZE - offset, from, offset);
  338. kunmap(bprm->page[j - 1]);
  339. to = from;
  340. }
  341. memmove(to, to + offset, PAGE_SIZE - offset);
  342. kunmap(bprm->page[j - 1]);
  343. /* Limit stack size to 1GB */
  344. stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
  345. if (stack_base > (1 << 30))
  346. stack_base = 1 << 30;
  347. stack_base = PAGE_ALIGN(stack_top - stack_base);
  348. /* Adjust bprm->p to point to the end of the strings. */
  349. bprm->p = stack_base + PAGE_SIZE * i - offset;
  350. mm->arg_start = stack_base;
  351. arg_size = i << PAGE_SHIFT;
  352. /* zero pages that were copied above */
  353. while (i < MAX_ARG_PAGES)
  354. bprm->page[i++] = NULL;
  355. #else
  356. stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
  357. stack_base = PAGE_ALIGN(stack_base);
  358. bprm->p += stack_base;
  359. mm->arg_start = bprm->p;
  360. arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
  361. #endif
  362. arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
  363. if (bprm->loader)
  364. bprm->loader += stack_base;
  365. bprm->exec += stack_base;
  366. mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  367. if (!mpnt)
  368. return -ENOMEM;
  369. if (security_vm_enough_memory(arg_size >> PAGE_SHIFT)) {
  370. kmem_cache_free(vm_area_cachep, mpnt);
  371. return -ENOMEM;
  372. }
  373. memset(mpnt, 0, sizeof(*mpnt));
  374. down_write(&mm->mmap_sem);
  375. {
  376. mpnt->vm_mm = mm;
  377. #ifdef CONFIG_STACK_GROWSUP
  378. mpnt->vm_start = stack_base;
  379. mpnt->vm_end = stack_base + arg_size;
  380. #else
  381. mpnt->vm_end = stack_top;
  382. mpnt->vm_start = mpnt->vm_end - arg_size;
  383. #endif
  384. /* Adjust stack execute permissions; explicitly enable
  385. * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
  386. * and leave alone (arch default) otherwise. */
  387. if (unlikely(executable_stack == EXSTACK_ENABLE_X))
  388. mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
  389. else if (executable_stack == EXSTACK_DISABLE_X)
  390. mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
  391. else
  392. mpnt->vm_flags = VM_STACK_FLAGS;
  393. mpnt->vm_flags |= mm->def_flags;
  394. mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
  395. if ((ret = insert_vm_struct(mm, mpnt))) {
  396. up_write(&mm->mmap_sem);
  397. kmem_cache_free(vm_area_cachep, mpnt);
  398. return ret;
  399. }
  400. mm->stack_vm = mm->total_vm = vma_pages(mpnt);
  401. }
  402. for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
  403. struct page *page = bprm->page[i];
  404. if (page) {
  405. bprm->page[i] = NULL;
  406. install_arg_page(mpnt, page, stack_base);
  407. }
  408. stack_base += PAGE_SIZE;
  409. }
  410. up_write(&mm->mmap_sem);
  411. return 0;
  412. }
  413. EXPORT_SYMBOL(setup_arg_pages);
  414. #define free_arg_pages(bprm) do { } while (0)
  415. #else
  416. static inline void free_arg_pages(struct linux_binprm *bprm)
  417. {
  418. int i;
  419. for (i = 0; i < MAX_ARG_PAGES; i++) {
  420. if (bprm->page[i])
  421. __free_page(bprm->page[i]);
  422. bprm->page[i] = NULL;
  423. }
  424. }
  425. #endif /* CONFIG_MMU */
  426. struct file *open_exec(const char *name)
  427. {
  428. struct nameidata nd;
  429. int err;
  430. struct file *file;
  431. nd.intent.open.flags = FMODE_READ;
  432. err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd);
  433. file = ERR_PTR(err);
  434. if (!err) {
  435. struct inode *inode = nd.dentry->d_inode;
  436. file = ERR_PTR(-EACCES);
  437. if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
  438. S_ISREG(inode->i_mode)) {
  439. int err = permission(inode, MAY_EXEC, &nd);
  440. if (!err && !(inode->i_mode & 0111))
  441. err = -EACCES;
  442. file = ERR_PTR(err);
  443. if (!err) {
  444. file = dentry_open(nd.dentry, nd.mnt, O_RDONLY);
  445. if (!IS_ERR(file)) {
  446. err = deny_write_access(file);
  447. if (err) {
  448. fput(file);
  449. file = ERR_PTR(err);
  450. }
  451. }
  452. out:
  453. return file;
  454. }
  455. }
  456. path_release(&nd);
  457. }
  458. goto out;
  459. }
  460. EXPORT_SYMBOL(open_exec);
  461. int kernel_read(struct file *file, unsigned long offset,
  462. char *addr, unsigned long count)
  463. {
  464. mm_segment_t old_fs;
  465. loff_t pos = offset;
  466. int result;
  467. old_fs = get_fs();
  468. set_fs(get_ds());
  469. /* The cast to a user pointer is valid due to the set_fs() */
  470. result = vfs_read(file, (void __user *)addr, count, &pos);
  471. set_fs(old_fs);
  472. return result;
  473. }
  474. EXPORT_SYMBOL(kernel_read);
  475. static int exec_mmap(struct mm_struct *mm)
  476. {
  477. struct task_struct *tsk;
  478. struct mm_struct * old_mm, *active_mm;
  479. /* Notify parent that we're no longer interested in the old VM */
  480. tsk = current;
  481. old_mm = current->mm;
  482. mm_release(tsk, old_mm);
  483. if (old_mm) {
  484. /*
  485. * Make sure that if there is a core dump in progress
  486. * for the old mm, we get out and die instead of going
  487. * through with the exec. We must hold mmap_sem around
  488. * checking core_waiters and changing tsk->mm. The
  489. * core-inducing thread will increment core_waiters for
  490. * each thread whose ->mm == old_mm.
  491. */
  492. down_read(&old_mm->mmap_sem);
  493. if (unlikely(old_mm->core_waiters)) {
  494. up_read(&old_mm->mmap_sem);
  495. return -EINTR;
  496. }
  497. }
  498. task_lock(tsk);
  499. active_mm = tsk->active_mm;
  500. tsk->mm = mm;
  501. tsk->active_mm = mm;
  502. activate_mm(active_mm, mm);
  503. task_unlock(tsk);
  504. arch_pick_mmap_layout(mm);
  505. if (old_mm) {
  506. up_read(&old_mm->mmap_sem);
  507. if (active_mm != old_mm) BUG();
  508. mmput(old_mm);
  509. return 0;
  510. }
  511. mmdrop(active_mm);
  512. return 0;
  513. }
  514. /*
  515. * This function makes sure the current process has its own signal table,
  516. * so that flush_signal_handlers can later reset the handlers without
  517. * disturbing other processes. (Other processes might share the signal
  518. * table via the CLONE_SIGHAND option to clone().)
  519. */
  520. static inline int de_thread(struct task_struct *tsk)
  521. {
  522. struct signal_struct *sig = tsk->signal;
  523. struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
  524. spinlock_t *lock = &oldsighand->siglock;
  525. int count;
  526. /*
  527. * If we don't share sighandlers, then we aren't sharing anything
  528. * and we can just re-use it all.
  529. */
  530. if (atomic_read(&oldsighand->count) <= 1) {
  531. BUG_ON(atomic_read(&sig->count) != 1);
  532. exit_itimers(sig);
  533. return 0;
  534. }
  535. newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
  536. if (!newsighand)
  537. return -ENOMEM;
  538. if (thread_group_empty(current))
  539. goto no_thread_group;
  540. /*
  541. * Kill all other threads in the thread group.
  542. * We must hold tasklist_lock to call zap_other_threads.
  543. */
  544. read_lock(&tasklist_lock);
  545. spin_lock_irq(lock);
  546. if (sig->flags & SIGNAL_GROUP_EXIT) {
  547. /*
  548. * Another group action in progress, just
  549. * return so that the signal is processed.
  550. */
  551. spin_unlock_irq(lock);
  552. read_unlock(&tasklist_lock);
  553. kmem_cache_free(sighand_cachep, newsighand);
  554. return -EAGAIN;
  555. }
  556. zap_other_threads(current);
  557. read_unlock(&tasklist_lock);
  558. /*
  559. * Account for the thread group leader hanging around:
  560. */
  561. count = 2;
  562. if (thread_group_leader(current))
  563. count = 1;
  564. while (atomic_read(&sig->count) > count) {
  565. sig->group_exit_task = current;
  566. sig->notify_count = count;
  567. __set_current_state(TASK_UNINTERRUPTIBLE);
  568. spin_unlock_irq(lock);
  569. schedule();
  570. spin_lock_irq(lock);
  571. }
  572. sig->group_exit_task = NULL;
  573. sig->notify_count = 0;
  574. sig->real_timer.data = (unsigned long)current;
  575. spin_unlock_irq(lock);
  576. /*
  577. * At this point all other threads have exited, all we have to
  578. * do is to wait for the thread group leader to become inactive,
  579. * and to assume its PID:
  580. */
  581. if (!thread_group_leader(current)) {
  582. struct task_struct *leader = current->group_leader, *parent;
  583. struct dentry *proc_dentry1, *proc_dentry2;
  584. unsigned long exit_state, ptrace;
  585. /*
  586. * Wait for the thread group leader to be a zombie.
  587. * It should already be zombie at this point, most
  588. * of the time.
  589. */
  590. while (leader->exit_state != EXIT_ZOMBIE)
  591. yield();
  592. spin_lock(&leader->proc_lock);
  593. spin_lock(&current->proc_lock);
  594. proc_dentry1 = proc_pid_unhash(current);
  595. proc_dentry2 = proc_pid_unhash(leader);
  596. write_lock_irq(&tasklist_lock);
  597. BUG_ON(leader->tgid != current->tgid);
  598. BUG_ON(current->pid == current->tgid);
  599. /*
  600. * An exec() starts a new thread group with the
  601. * TGID of the previous thread group. Rehash the
  602. * two threads with a switched PID, and release
  603. * the former thread group leader:
  604. */
  605. ptrace = leader->ptrace;
  606. parent = leader->parent;
  607. if (unlikely(ptrace) && unlikely(parent == current)) {
  608. /*
  609. * Joker was ptracing his own group leader,
  610. * and now he wants to be his own parent!
  611. * We can't have that.
  612. */
  613. ptrace = 0;
  614. }
  615. ptrace_unlink(current);
  616. ptrace_unlink(leader);
  617. remove_parent(current);
  618. remove_parent(leader);
  619. switch_exec_pids(leader, current);
  620. current->parent = current->real_parent = leader->real_parent;
  621. leader->parent = leader->real_parent = child_reaper;
  622. current->group_leader = current;
  623. leader->group_leader = leader;
  624. add_parent(current, current->parent);
  625. add_parent(leader, leader->parent);
  626. if (ptrace) {
  627. current->ptrace = ptrace;
  628. __ptrace_link(current, parent);
  629. }
  630. list_del(&current->tasks);
  631. list_add_tail(&current->tasks, &init_task.tasks);
  632. current->exit_signal = SIGCHLD;
  633. exit_state = leader->exit_state;
  634. write_unlock_irq(&tasklist_lock);
  635. spin_unlock(&leader->proc_lock);
  636. spin_unlock(&current->proc_lock);
  637. proc_pid_flush(proc_dentry1);
  638. proc_pid_flush(proc_dentry2);
  639. BUG_ON(exit_state != EXIT_ZOMBIE);
  640. release_task(leader);
  641. }
  642. /*
  643. * Now there are really no other threads at all,
  644. * so it's safe to stop telling them to kill themselves.
  645. */
  646. sig->flags = 0;
  647. no_thread_group:
  648. BUG_ON(atomic_read(&sig->count) != 1);
  649. exit_itimers(sig);
  650. if (atomic_read(&oldsighand->count) == 1) {
  651. /*
  652. * Now that we nuked the rest of the thread group,
  653. * it turns out we are not sharing sighand any more either.
  654. * So we can just keep it.
  655. */
  656. kmem_cache_free(sighand_cachep, newsighand);
  657. } else {
  658. /*
  659. * Move our state over to newsighand and switch it in.
  660. */
  661. spin_lock_init(&newsighand->siglock);
  662. atomic_set(&newsighand->count, 1);
  663. memcpy(newsighand->action, oldsighand->action,
  664. sizeof(newsighand->action));
  665. write_lock_irq(&tasklist_lock);
  666. spin_lock(&oldsighand->siglock);
  667. spin_lock(&newsighand->siglock);
  668. current->sighand = newsighand;
  669. recalc_sigpending();
  670. spin_unlock(&newsighand->siglock);
  671. spin_unlock(&oldsighand->siglock);
  672. write_unlock_irq(&tasklist_lock);
  673. if (atomic_dec_and_test(&oldsighand->count))
  674. kmem_cache_free(sighand_cachep, oldsighand);
  675. }
  676. BUG_ON(!thread_group_empty(current));
  677. BUG_ON(!thread_group_leader(current));
  678. return 0;
  679. }
  680. /*
  681. * These functions flushes out all traces of the currently running executable
  682. * so that a new one can be started
  683. */
  684. static inline void flush_old_files(struct files_struct * files)
  685. {
  686. long j = -1;
  687. spin_lock(&files->file_lock);
  688. for (;;) {
  689. unsigned long set, i;
  690. j++;
  691. i = j * __NFDBITS;
  692. if (i >= files->max_fds || i >= files->max_fdset)
  693. break;
  694. set = files->close_on_exec->fds_bits[j];
  695. if (!set)
  696. continue;
  697. files->close_on_exec->fds_bits[j] = 0;
  698. spin_unlock(&files->file_lock);
  699. for ( ; set ; i++,set >>= 1) {
  700. if (set & 1) {
  701. sys_close(i);
  702. }
  703. }
  704. spin_lock(&files->file_lock);
  705. }
  706. spin_unlock(&files->file_lock);
  707. }
  708. void get_task_comm(char *buf, struct task_struct *tsk)
  709. {
  710. /* buf must be at least sizeof(tsk->comm) in size */
  711. task_lock(tsk);
  712. strncpy(buf, tsk->comm, sizeof(tsk->comm));
  713. task_unlock(tsk);
  714. }
  715. void set_task_comm(struct task_struct *tsk, char *buf)
  716. {
  717. task_lock(tsk);
  718. strlcpy(tsk->comm, buf, sizeof(tsk->comm));
  719. task_unlock(tsk);
  720. }
  721. int flush_old_exec(struct linux_binprm * bprm)
  722. {
  723. char * name;
  724. int i, ch, retval;
  725. struct files_struct *files;
  726. char tcomm[sizeof(current->comm)];
  727. /*
  728. * Make sure we have a private signal table and that
  729. * we are unassociated from the previous thread group.
  730. */
  731. retval = de_thread(current);
  732. if (retval)
  733. goto out;
  734. /*
  735. * Make sure we have private file handles. Ask the
  736. * fork helper to do the work for us and the exit
  737. * helper to do the cleanup of the old one.
  738. */
  739. files = current->files; /* refcounted so safe to hold */
  740. retval = unshare_files();
  741. if (retval)
  742. goto out;
  743. /*
  744. * Release all of the old mmap stuff
  745. */
  746. retval = exec_mmap(bprm->mm);
  747. if (retval)
  748. goto mmap_failed;
  749. bprm->mm = NULL; /* We're using it now */
  750. /* This is the point of no return */
  751. steal_locks(files);
  752. put_files_struct(files);
  753. current->sas_ss_sp = current->sas_ss_size = 0;
  754. if (current->euid == current->uid && current->egid == current->gid)
  755. current->mm->dumpable = 1;
  756. name = bprm->filename;
  757. /* Copies the binary name from after last slash */
  758. for (i=0; (ch = *(name++)) != '\0';) {
  759. if (ch == '/')
  760. i = 0; /* overwrite what we wrote */
  761. else
  762. if (i < (sizeof(tcomm) - 1))
  763. tcomm[i++] = ch;
  764. }
  765. tcomm[i] = '\0';
  766. set_task_comm(current, tcomm);
  767. current->flags &= ~PF_RANDOMIZE;
  768. flush_thread();
  769. if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
  770. permission(bprm->file->f_dentry->d_inode,MAY_READ, NULL) ||
  771. (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
  772. suid_keys(current);
  773. current->mm->dumpable = 0;
  774. }
  775. /* An exec changes our domain. We are no longer part of the thread
  776. group */
  777. current->self_exec_id++;
  778. flush_signal_handlers(current, 0);
  779. flush_old_files(current->files);
  780. return 0;
  781. mmap_failed:
  782. put_files_struct(current->files);
  783. current->files = files;
  784. out:
  785. return retval;
  786. }
  787. EXPORT_SYMBOL(flush_old_exec);
  788. /*
  789. * Fill the binprm structure from the inode.
  790. * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
  791. */
  792. int prepare_binprm(struct linux_binprm *bprm)
  793. {
  794. int mode;
  795. struct inode * inode = bprm->file->f_dentry->d_inode;
  796. int retval;
  797. mode = inode->i_mode;
  798. /*
  799. * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
  800. * generic_permission lets a non-executable through
  801. */
  802. if (!(mode & 0111)) /* with at least _one_ execute bit set */
  803. return -EACCES;
  804. if (bprm->file->f_op == NULL)
  805. return -EACCES;
  806. bprm->e_uid = current->euid;
  807. bprm->e_gid = current->egid;
  808. if(!(bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)) {
  809. /* Set-uid? */
  810. if (mode & S_ISUID) {
  811. current->personality &= ~PER_CLEAR_ON_SETID;
  812. bprm->e_uid = inode->i_uid;
  813. }
  814. /* Set-gid? */
  815. /*
  816. * If setgid is set but no group execute bit then this
  817. * is a candidate for mandatory locking, not a setgid
  818. * executable.
  819. */
  820. if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
  821. current->personality &= ~PER_CLEAR_ON_SETID;
  822. bprm->e_gid = inode->i_gid;
  823. }
  824. }
  825. /* fill in binprm security blob */
  826. retval = security_bprm_set(bprm);
  827. if (retval)
  828. return retval;
  829. memset(bprm->buf,0,BINPRM_BUF_SIZE);
  830. return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
  831. }
  832. EXPORT_SYMBOL(prepare_binprm);
  833. static inline int unsafe_exec(struct task_struct *p)
  834. {
  835. int unsafe = 0;
  836. if (p->ptrace & PT_PTRACED) {
  837. if (p->ptrace & PT_PTRACE_CAP)
  838. unsafe |= LSM_UNSAFE_PTRACE_CAP;
  839. else
  840. unsafe |= LSM_UNSAFE_PTRACE;
  841. }
  842. if (atomic_read(&p->fs->count) > 1 ||
  843. atomic_read(&p->files->count) > 1 ||
  844. atomic_read(&p->sighand->count) > 1)
  845. unsafe |= LSM_UNSAFE_SHARE;
  846. return unsafe;
  847. }
  848. void compute_creds(struct linux_binprm *bprm)
  849. {
  850. int unsafe;
  851. if (bprm->e_uid != current->uid)
  852. suid_keys(current);
  853. exec_keys(current);
  854. task_lock(current);
  855. unsafe = unsafe_exec(current);
  856. security_bprm_apply_creds(bprm, unsafe);
  857. task_unlock(current);
  858. security_bprm_post_apply_creds(bprm);
  859. }
  860. EXPORT_SYMBOL(compute_creds);
  861. void remove_arg_zero(struct linux_binprm *bprm)
  862. {
  863. if (bprm->argc) {
  864. unsigned long offset;
  865. char * kaddr;
  866. struct page *page;
  867. offset = bprm->p % PAGE_SIZE;
  868. goto inside;
  869. while (bprm->p++, *(kaddr+offset++)) {
  870. if (offset != PAGE_SIZE)
  871. continue;
  872. offset = 0;
  873. kunmap_atomic(kaddr, KM_USER0);
  874. inside:
  875. page = bprm->page[bprm->p/PAGE_SIZE];
  876. kaddr = kmap_atomic(page, KM_USER0);
  877. }
  878. kunmap_atomic(kaddr, KM_USER0);
  879. bprm->argc--;
  880. }
  881. }
  882. EXPORT_SYMBOL(remove_arg_zero);
  883. /*
  884. * cycle the list of binary formats handler, until one recognizes the image
  885. */
  886. int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
  887. {
  888. int try,retval;
  889. struct linux_binfmt *fmt;
  890. #ifdef __alpha__
  891. /* handle /sbin/loader.. */
  892. {
  893. struct exec * eh = (struct exec *) bprm->buf;
  894. if (!bprm->loader && eh->fh.f_magic == 0x183 &&
  895. (eh->fh.f_flags & 0x3000) == 0x3000)
  896. {
  897. struct file * file;
  898. unsigned long loader;
  899. allow_write_access(bprm->file);
  900. fput(bprm->file);
  901. bprm->file = NULL;
  902. loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
  903. file = open_exec("/sbin/loader");
  904. retval = PTR_ERR(file);
  905. if (IS_ERR(file))
  906. return retval;
  907. /* Remember if the application is TASO. */
  908. bprm->sh_bang = eh->ah.entry < 0x100000000UL;
  909. bprm->file = file;
  910. bprm->loader = loader;
  911. retval = prepare_binprm(bprm);
  912. if (retval<0)
  913. return retval;
  914. /* should call search_binary_handler recursively here,
  915. but it does not matter */
  916. }
  917. }
  918. #endif
  919. retval = security_bprm_check(bprm);
  920. if (retval)
  921. return retval;
  922. /* kernel module loader fixup */
  923. /* so we don't try to load run modprobe in kernel space. */
  924. set_fs(USER_DS);
  925. retval = -ENOENT;
  926. for (try=0; try<2; try++) {
  927. read_lock(&binfmt_lock);
  928. for (fmt = formats ; fmt ; fmt = fmt->next) {
  929. int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
  930. if (!fn)
  931. continue;
  932. if (!try_module_get(fmt->module))
  933. continue;
  934. read_unlock(&binfmt_lock);
  935. retval = fn(bprm, regs);
  936. if (retval >= 0) {
  937. put_binfmt(fmt);
  938. allow_write_access(bprm->file);
  939. if (bprm->file)
  940. fput(bprm->file);
  941. bprm->file = NULL;
  942. current->did_exec = 1;
  943. return retval;
  944. }
  945. read_lock(&binfmt_lock);
  946. put_binfmt(fmt);
  947. if (retval != -ENOEXEC || bprm->mm == NULL)
  948. break;
  949. if (!bprm->file) {
  950. read_unlock(&binfmt_lock);
  951. return retval;
  952. }
  953. }
  954. read_unlock(&binfmt_lock);
  955. if (retval != -ENOEXEC || bprm->mm == NULL) {
  956. break;
  957. #ifdef CONFIG_KMOD
  958. }else{
  959. #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
  960. if (printable(bprm->buf[0]) &&
  961. printable(bprm->buf[1]) &&
  962. printable(bprm->buf[2]) &&
  963. printable(bprm->buf[3]))
  964. break; /* -ENOEXEC */
  965. request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
  966. #endif
  967. }
  968. }
  969. return retval;
  970. }
  971. EXPORT_SYMBOL(search_binary_handler);
  972. /*
  973. * sys_execve() executes a new program.
  974. */
  975. int do_execve(char * filename,
  976. char __user *__user *argv,
  977. char __user *__user *envp,
  978. struct pt_regs * regs)
  979. {
  980. struct linux_binprm *bprm;
  981. struct file *file;
  982. int retval;
  983. int i;
  984. retval = -ENOMEM;
  985. bprm = kmalloc(sizeof(*bprm), GFP_KERNEL);
  986. if (!bprm)
  987. goto out_ret;
  988. memset(bprm, 0, sizeof(*bprm));
  989. file = open_exec(filename);
  990. retval = PTR_ERR(file);
  991. if (IS_ERR(file))
  992. goto out_kfree;
  993. sched_exec();
  994. bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
  995. bprm->file = file;
  996. bprm->filename = filename;
  997. bprm->interp = filename;
  998. bprm->mm = mm_alloc();
  999. retval = -ENOMEM;
  1000. if (!bprm->mm)
  1001. goto out_file;
  1002. retval = init_new_context(current, bprm->mm);
  1003. if (retval < 0)
  1004. goto out_mm;
  1005. bprm->argc = count(argv, bprm->p / sizeof(void *));
  1006. if ((retval = bprm->argc) < 0)
  1007. goto out_mm;
  1008. bprm->envc = count(envp, bprm->p / sizeof(void *));
  1009. if ((retval = bprm->envc) < 0)
  1010. goto out_mm;
  1011. retval = security_bprm_alloc(bprm);
  1012. if (retval)
  1013. goto out;
  1014. retval = prepare_binprm(bprm);
  1015. if (retval < 0)
  1016. goto out;
  1017. retval = copy_strings_kernel(1, &bprm->filename, bprm);
  1018. if (retval < 0)
  1019. goto out;
  1020. bprm->exec = bprm->p;
  1021. retval = copy_strings(bprm->envc, envp, bprm);
  1022. if (retval < 0)
  1023. goto out;
  1024. retval = copy_strings(bprm->argc, argv, bprm);
  1025. if (retval < 0)
  1026. goto out;
  1027. retval = search_binary_handler(bprm,regs);
  1028. if (retval >= 0) {
  1029. free_arg_pages(bprm);
  1030. /* execve success */
  1031. security_bprm_free(bprm);
  1032. acct_update_integrals(current);
  1033. update_mem_hiwater(current);
  1034. kfree(bprm);
  1035. return retval;
  1036. }
  1037. out:
  1038. /* Something went wrong, return the inode and free the argument pages*/
  1039. for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
  1040. struct page * page = bprm->page[i];
  1041. if (page)
  1042. __free_page(page);
  1043. }
  1044. if (bprm->security)
  1045. security_bprm_free(bprm);
  1046. out_mm:
  1047. if (bprm->mm)
  1048. mmdrop(bprm->mm);
  1049. out_file:
  1050. if (bprm->file) {
  1051. allow_write_access(bprm->file);
  1052. fput(bprm->file);
  1053. }
  1054. out_kfree:
  1055. kfree(bprm);
  1056. out_ret:
  1057. return retval;
  1058. }
  1059. int set_binfmt(struct linux_binfmt *new)
  1060. {
  1061. struct linux_binfmt *old = current->binfmt;
  1062. if (new) {
  1063. if (!try_module_get(new->module))
  1064. return -1;
  1065. }
  1066. current->binfmt = new;
  1067. if (old)
  1068. module_put(old->module);
  1069. return 0;
  1070. }
  1071. EXPORT_SYMBOL(set_binfmt);
  1072. #define CORENAME_MAX_SIZE 64
  1073. /* format_corename will inspect the pattern parameter, and output a
  1074. * name into corename, which must have space for at least
  1075. * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
  1076. */
  1077. static void format_corename(char *corename, const char *pattern, long signr)
  1078. {
  1079. const char *pat_ptr = pattern;
  1080. char *out_ptr = corename;
  1081. char *const out_end = corename + CORENAME_MAX_SIZE;
  1082. int rc;
  1083. int pid_in_pattern = 0;
  1084. /* Repeat as long as we have more pattern to process and more output
  1085. space */
  1086. while (*pat_ptr) {
  1087. if (*pat_ptr != '%') {
  1088. if (out_ptr == out_end)
  1089. goto out;
  1090. *out_ptr++ = *pat_ptr++;
  1091. } else {
  1092. switch (*++pat_ptr) {
  1093. case 0:
  1094. goto out;
  1095. /* Double percent, output one percent */
  1096. case '%':
  1097. if (out_ptr == out_end)
  1098. goto out;
  1099. *out_ptr++ = '%';
  1100. break;
  1101. /* pid */
  1102. case 'p':
  1103. pid_in_pattern = 1;
  1104. rc = snprintf(out_ptr, out_end - out_ptr,
  1105. "%d", current->tgid);
  1106. if (rc > out_end - out_ptr)
  1107. goto out;
  1108. out_ptr += rc;
  1109. break;
  1110. /* uid */
  1111. case 'u':
  1112. rc = snprintf(out_ptr, out_end - out_ptr,
  1113. "%d", current->uid);
  1114. if (rc > out_end - out_ptr)
  1115. goto out;
  1116. out_ptr += rc;
  1117. break;
  1118. /* gid */
  1119. case 'g':
  1120. rc = snprintf(out_ptr, out_end - out_ptr,
  1121. "%d", current->gid);
  1122. if (rc > out_end - out_ptr)
  1123. goto out;
  1124. out_ptr += rc;
  1125. break;
  1126. /* signal that caused the coredump */
  1127. case 's':
  1128. rc = snprintf(out_ptr, out_end - out_ptr,
  1129. "%ld", signr);
  1130. if (rc > out_end - out_ptr)
  1131. goto out;
  1132. out_ptr += rc;
  1133. break;
  1134. /* UNIX time of coredump */
  1135. case 't': {
  1136. struct timeval tv;
  1137. do_gettimeofday(&tv);
  1138. rc = snprintf(out_ptr, out_end - out_ptr,
  1139. "%lu", tv.tv_sec);
  1140. if (rc > out_end - out_ptr)
  1141. goto out;
  1142. out_ptr += rc;
  1143. break;
  1144. }
  1145. /* hostname */
  1146. case 'h':
  1147. down_read(&uts_sem);
  1148. rc = snprintf(out_ptr, out_end - out_ptr,
  1149. "%s", system_utsname.nodename);
  1150. up_read(&uts_sem);
  1151. if (rc > out_end - out_ptr)
  1152. goto out;
  1153. out_ptr += rc;
  1154. break;
  1155. /* executable */
  1156. case 'e':
  1157. rc = snprintf(out_ptr, out_end - out_ptr,
  1158. "%s", current->comm);
  1159. if (rc > out_end - out_ptr)
  1160. goto out;
  1161. out_ptr += rc;
  1162. break;
  1163. default:
  1164. break;
  1165. }
  1166. ++pat_ptr;
  1167. }
  1168. }
  1169. /* Backward compatibility with core_uses_pid:
  1170. *
  1171. * If core_pattern does not include a %p (as is the default)
  1172. * and core_uses_pid is set, then .%pid will be appended to
  1173. * the filename */
  1174. if (!pid_in_pattern
  1175. && (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) {
  1176. rc = snprintf(out_ptr, out_end - out_ptr,
  1177. ".%d", current->tgid);
  1178. if (rc > out_end - out_ptr)
  1179. goto out;
  1180. out_ptr += rc;
  1181. }
  1182. out:
  1183. *out_ptr = 0;
  1184. }
  1185. static void zap_threads (struct mm_struct *mm)
  1186. {
  1187. struct task_struct *g, *p;
  1188. struct task_struct *tsk = current;
  1189. struct completion *vfork_done = tsk->vfork_done;
  1190. int traced = 0;
  1191. /*
  1192. * Make sure nobody is waiting for us to release the VM,
  1193. * otherwise we can deadlock when we wait on each other
  1194. */
  1195. if (vfork_done) {
  1196. tsk->vfork_done = NULL;
  1197. complete(vfork_done);
  1198. }
  1199. read_lock(&tasklist_lock);
  1200. do_each_thread(g,p)
  1201. if (mm == p->mm && p != tsk) {
  1202. force_sig_specific(SIGKILL, p);
  1203. mm->core_waiters++;
  1204. if (unlikely(p->ptrace) &&
  1205. unlikely(p->parent->mm == mm))
  1206. traced = 1;
  1207. }
  1208. while_each_thread(g,p);
  1209. read_unlock(&tasklist_lock);
  1210. if (unlikely(traced)) {
  1211. /*
  1212. * We are zapping a thread and the thread it ptraces.
  1213. * If the tracee went into a ptrace stop for exit tracing,
  1214. * we could deadlock since the tracer is waiting for this
  1215. * coredump to finish. Detach them so they can both die.
  1216. */
  1217. write_lock_irq(&tasklist_lock);
  1218. do_each_thread(g,p) {
  1219. if (mm == p->mm && p != tsk &&
  1220. p->ptrace && p->parent->mm == mm) {
  1221. __ptrace_unlink(p);
  1222. }
  1223. } while_each_thread(g,p);
  1224. write_unlock_irq(&tasklist_lock);
  1225. }
  1226. }
  1227. static void coredump_wait(struct mm_struct *mm)
  1228. {
  1229. DECLARE_COMPLETION(startup_done);
  1230. mm->core_waiters++; /* let other threads block */
  1231. mm->core_startup_done = &startup_done;
  1232. /* give other threads a chance to run: */
  1233. yield();
  1234. zap_threads(mm);
  1235. if (--mm->core_waiters) {
  1236. up_write(&mm->mmap_sem);
  1237. wait_for_completion(&startup_done);
  1238. } else
  1239. up_write(&mm->mmap_sem);
  1240. BUG_ON(mm->core_waiters);
  1241. }
  1242. int do_coredump(long signr, int exit_code, struct pt_regs * regs)
  1243. {
  1244. char corename[CORENAME_MAX_SIZE + 1];
  1245. struct mm_struct *mm = current->mm;
  1246. struct linux_binfmt * binfmt;
  1247. struct inode * inode;
  1248. struct file * file;
  1249. int retval = 0;
  1250. binfmt = current->binfmt;
  1251. if (!binfmt || !binfmt->core_dump)
  1252. goto fail;
  1253. down_write(&mm->mmap_sem);
  1254. if (!mm->dumpable) {
  1255. up_write(&mm->mmap_sem);
  1256. goto fail;
  1257. }
  1258. mm->dumpable = 0;
  1259. init_completion(&mm->core_done);
  1260. spin_lock_irq(&current->sighand->siglock);
  1261. current->signal->flags = SIGNAL_GROUP_EXIT;
  1262. current->signal->group_exit_code = exit_code;
  1263. spin_unlock_irq(&current->sighand->siglock);
  1264. coredump_wait(mm);
  1265. /*
  1266. * Clear any false indication of pending signals that might
  1267. * be seen by the filesystem code called to write the core file.
  1268. */
  1269. current->signal->group_stop_count = 0;
  1270. clear_thread_flag(TIF_SIGPENDING);
  1271. if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
  1272. goto fail_unlock;
  1273. /*
  1274. * lock_kernel() because format_corename() is controlled by sysctl, which
  1275. * uses lock_kernel()
  1276. */
  1277. lock_kernel();
  1278. format_corename(corename, core_pattern, signr);
  1279. unlock_kernel();
  1280. file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600);
  1281. if (IS_ERR(file))
  1282. goto fail_unlock;
  1283. inode = file->f_dentry->d_inode;
  1284. if (inode->i_nlink > 1)
  1285. goto close_fail; /* multiple links - don't dump */
  1286. if (d_unhashed(file->f_dentry))
  1287. goto close_fail;
  1288. if (!S_ISREG(inode->i_mode))
  1289. goto close_fail;
  1290. if (!file->f_op)
  1291. goto close_fail;
  1292. if (!file->f_op->write)
  1293. goto close_fail;
  1294. if (do_truncate(file->f_dentry, 0) != 0)
  1295. goto close_fail;
  1296. retval = binfmt->core_dump(signr, regs, file);
  1297. if (retval)
  1298. current->signal->group_exit_code |= 0x80;
  1299. close_fail:
  1300. filp_close(file, NULL);
  1301. fail_unlock:
  1302. complete_all(&mm->core_done);
  1303. fail:
  1304. return retval;
  1305. }