exec.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827
  1. /*
  2. * linux/fs/exec.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. /*
  7. * #!-checking implemented by tytso.
  8. */
  9. /*
  10. * Demand-loading implemented 01.12.91 - no need to read anything but
  11. * the header into memory. The inode of the executable is put into
  12. * "current->executable", and page faults do the actual loading. Clean.
  13. *
  14. * Once more I can proudly say that linux stood up to being changed: it
  15. * was less than 2 hours work to get demand-loading completely implemented.
  16. *
  17. * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
  18. * current->executable is only used by the procfs. This allows a dispatch
  19. * table to check for several different types of binary formats. We keep
  20. * trying until we recognize the file or we run out of supported binary
  21. * formats.
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/file.h>
  25. #include <linux/mman.h>
  26. #include <linux/a.out.h>
  27. #include <linux/stat.h>
  28. #include <linux/fcntl.h>
  29. #include <linux/smp_lock.h>
  30. #include <linux/string.h>
  31. #include <linux/init.h>
  32. #include <linux/pagemap.h>
  33. #include <linux/highmem.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/key.h>
  36. #include <linux/personality.h>
  37. #include <linux/binfmts.h>
  38. #include <linux/swap.h>
  39. #include <linux/utsname.h>
  40. #include <linux/pid_namespace.h>
  41. #include <linux/module.h>
  42. #include <linux/namei.h>
  43. #include <linux/proc_fs.h>
  44. #include <linux/ptrace.h>
  45. #include <linux/mount.h>
  46. #include <linux/security.h>
  47. #include <linux/syscalls.h>
  48. #include <linux/rmap.h>
  49. #include <linux/tsacct_kern.h>
  50. #include <linux/cn_proc.h>
  51. #include <linux/audit.h>
  52. #include <asm/uaccess.h>
  53. #include <asm/mmu_context.h>
  54. #include <asm/tlb.h>
  55. #ifdef CONFIG_KMOD
  56. #include <linux/kmod.h>
  57. #endif
  58. int core_uses_pid;
  59. char core_pattern[CORENAME_MAX_SIZE] = "core";
  60. int suid_dumpable = 0;
  61. EXPORT_SYMBOL(suid_dumpable);
  62. /* The maximal length of core_pattern is also specified in sysctl.c */
  63. static LIST_HEAD(formats);
  64. static DEFINE_RWLOCK(binfmt_lock);
  65. int register_binfmt(struct linux_binfmt * fmt)
  66. {
  67. if (!fmt)
  68. return -EINVAL;
  69. write_lock(&binfmt_lock);
  70. list_add(&fmt->lh, &formats);
  71. write_unlock(&binfmt_lock);
  72. return 0;
  73. }
  74. EXPORT_SYMBOL(register_binfmt);
  75. void unregister_binfmt(struct linux_binfmt * fmt)
  76. {
  77. write_lock(&binfmt_lock);
  78. list_del(&fmt->lh);
  79. write_unlock(&binfmt_lock);
  80. }
  81. EXPORT_SYMBOL(unregister_binfmt);
  82. static inline void put_binfmt(struct linux_binfmt * fmt)
  83. {
  84. module_put(fmt->module);
  85. }
  86. /*
  87. * Note that a shared library must be both readable and executable due to
  88. * security reasons.
  89. *
  90. * Also note that we take the address to load from from the file itself.
  91. */
  92. asmlinkage long sys_uselib(const char __user * library)
  93. {
  94. struct file * file;
  95. struct nameidata nd;
  96. int error;
  97. error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
  98. if (error)
  99. goto out;
  100. error = -EACCES;
  101. if (nd.mnt->mnt_flags & MNT_NOEXEC)
  102. goto exit;
  103. error = -EINVAL;
  104. if (!S_ISREG(nd.dentry->d_inode->i_mode))
  105. goto exit;
  106. error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
  107. if (error)
  108. goto exit;
  109. file = nameidata_to_filp(&nd, O_RDONLY);
  110. error = PTR_ERR(file);
  111. if (IS_ERR(file))
  112. goto out;
  113. error = -ENOEXEC;
  114. if(file->f_op) {
  115. struct linux_binfmt * fmt;
  116. read_lock(&binfmt_lock);
  117. list_for_each_entry(fmt, &formats, lh) {
  118. if (!fmt->load_shlib)
  119. continue;
  120. if (!try_module_get(fmt->module))
  121. continue;
  122. read_unlock(&binfmt_lock);
  123. error = fmt->load_shlib(file);
  124. read_lock(&binfmt_lock);
  125. put_binfmt(fmt);
  126. if (error != -ENOEXEC)
  127. break;
  128. }
  129. read_unlock(&binfmt_lock);
  130. }
  131. fput(file);
  132. out:
  133. return error;
  134. exit:
  135. release_open_intent(&nd);
  136. path_release(&nd);
  137. goto out;
  138. }
  139. #ifdef CONFIG_MMU
  140. static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
  141. int write)
  142. {
  143. struct page *page;
  144. int ret;
  145. #ifdef CONFIG_STACK_GROWSUP
  146. if (write) {
  147. ret = expand_stack_downwards(bprm->vma, pos);
  148. if (ret < 0)
  149. return NULL;
  150. }
  151. #endif
  152. ret = get_user_pages(current, bprm->mm, pos,
  153. 1, write, 1, &page, NULL);
  154. if (ret <= 0)
  155. return NULL;
  156. if (write) {
  157. struct rlimit *rlim = current->signal->rlim;
  158. unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
  159. /*
  160. * Limit to 1/4-th the stack size for the argv+env strings.
  161. * This ensures that:
  162. * - the remaining binfmt code will not run out of stack space,
  163. * - the program will have a reasonable amount of stack left
  164. * to work from.
  165. */
  166. if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
  167. put_page(page);
  168. return NULL;
  169. }
  170. }
  171. return page;
  172. }
  173. static void put_arg_page(struct page *page)
  174. {
  175. put_page(page);
  176. }
  177. static void free_arg_page(struct linux_binprm *bprm, int i)
  178. {
  179. }
  180. static void free_arg_pages(struct linux_binprm *bprm)
  181. {
  182. }
  183. static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
  184. struct page *page)
  185. {
  186. flush_cache_page(bprm->vma, pos, page_to_pfn(page));
  187. }
  188. static int __bprm_mm_init(struct linux_binprm *bprm)
  189. {
  190. int err = -ENOMEM;
  191. struct vm_area_struct *vma = NULL;
  192. struct mm_struct *mm = bprm->mm;
  193. bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
  194. if (!vma)
  195. goto err;
  196. down_write(&mm->mmap_sem);
  197. vma->vm_mm = mm;
  198. /*
  199. * Place the stack at the largest stack address the architecture
  200. * supports. Later, we'll move this to an appropriate place. We don't
  201. * use STACK_TOP because that can depend on attributes which aren't
  202. * configured yet.
  203. */
  204. vma->vm_end = STACK_TOP_MAX;
  205. vma->vm_start = vma->vm_end - PAGE_SIZE;
  206. vma->vm_flags = VM_STACK_FLAGS;
  207. vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
  208. err = insert_vm_struct(mm, vma);
  209. if (err) {
  210. up_write(&mm->mmap_sem);
  211. goto err;
  212. }
  213. mm->stack_vm = mm->total_vm = 1;
  214. up_write(&mm->mmap_sem);
  215. bprm->p = vma->vm_end - sizeof(void *);
  216. return 0;
  217. err:
  218. if (vma) {
  219. bprm->vma = NULL;
  220. kmem_cache_free(vm_area_cachep, vma);
  221. }
  222. return err;
  223. }
  224. static bool valid_arg_len(struct linux_binprm *bprm, long len)
  225. {
  226. return len <= MAX_ARG_STRLEN;
  227. }
  228. #else
  229. static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
  230. int write)
  231. {
  232. struct page *page;
  233. page = bprm->page[pos / PAGE_SIZE];
  234. if (!page && write) {
  235. page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
  236. if (!page)
  237. return NULL;
  238. bprm->page[pos / PAGE_SIZE] = page;
  239. }
  240. return page;
  241. }
  242. static void put_arg_page(struct page *page)
  243. {
  244. }
  245. static void free_arg_page(struct linux_binprm *bprm, int i)
  246. {
  247. if (bprm->page[i]) {
  248. __free_page(bprm->page[i]);
  249. bprm->page[i] = NULL;
  250. }
  251. }
  252. static void free_arg_pages(struct linux_binprm *bprm)
  253. {
  254. int i;
  255. for (i = 0; i < MAX_ARG_PAGES; i++)
  256. free_arg_page(bprm, i);
  257. }
  258. static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
  259. struct page *page)
  260. {
  261. }
  262. static int __bprm_mm_init(struct linux_binprm *bprm)
  263. {
  264. bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
  265. return 0;
  266. }
  267. static bool valid_arg_len(struct linux_binprm *bprm, long len)
  268. {
  269. return len <= bprm->p;
  270. }
  271. #endif /* CONFIG_MMU */
  272. /*
  273. * Create a new mm_struct and populate it with a temporary stack
  274. * vm_area_struct. We don't have enough context at this point to set the stack
  275. * flags, permissions, and offset, so we use temporary values. We'll update
  276. * them later in setup_arg_pages().
  277. */
  278. int bprm_mm_init(struct linux_binprm *bprm)
  279. {
  280. int err;
  281. struct mm_struct *mm = NULL;
  282. bprm->mm = mm = mm_alloc();
  283. err = -ENOMEM;
  284. if (!mm)
  285. goto err;
  286. err = init_new_context(current, mm);
  287. if (err)
  288. goto err;
  289. err = __bprm_mm_init(bprm);
  290. if (err)
  291. goto err;
  292. return 0;
  293. err:
  294. if (mm) {
  295. bprm->mm = NULL;
  296. mmdrop(mm);
  297. }
  298. return err;
  299. }
  300. /*
  301. * count() counts the number of strings in array ARGV.
  302. */
  303. static int count(char __user * __user * argv, int max)
  304. {
  305. int i = 0;
  306. if (argv != NULL) {
  307. for (;;) {
  308. char __user * p;
  309. if (get_user(p, argv))
  310. return -EFAULT;
  311. if (!p)
  312. break;
  313. argv++;
  314. if(++i > max)
  315. return -E2BIG;
  316. cond_resched();
  317. }
  318. }
  319. return i;
  320. }
  321. /*
  322. * 'copy_strings()' copies argument/environment strings from the old
  323. * processes's memory to the new process's stack. The call to get_user_pages()
  324. * ensures the destination page is created and not swapped out.
  325. */
  326. static int copy_strings(int argc, char __user * __user * argv,
  327. struct linux_binprm *bprm)
  328. {
  329. struct page *kmapped_page = NULL;
  330. char *kaddr = NULL;
  331. unsigned long kpos = 0;
  332. int ret;
  333. while (argc-- > 0) {
  334. char __user *str;
  335. int len;
  336. unsigned long pos;
  337. if (get_user(str, argv+argc) ||
  338. !(len = strnlen_user(str, MAX_ARG_STRLEN))) {
  339. ret = -EFAULT;
  340. goto out;
  341. }
  342. if (!valid_arg_len(bprm, len)) {
  343. ret = -E2BIG;
  344. goto out;
  345. }
  346. /* We're going to work our way backwords. */
  347. pos = bprm->p;
  348. str += len;
  349. bprm->p -= len;
  350. while (len > 0) {
  351. int offset, bytes_to_copy;
  352. offset = pos % PAGE_SIZE;
  353. if (offset == 0)
  354. offset = PAGE_SIZE;
  355. bytes_to_copy = offset;
  356. if (bytes_to_copy > len)
  357. bytes_to_copy = len;
  358. offset -= bytes_to_copy;
  359. pos -= bytes_to_copy;
  360. str -= bytes_to_copy;
  361. len -= bytes_to_copy;
  362. if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
  363. struct page *page;
  364. page = get_arg_page(bprm, pos, 1);
  365. if (!page) {
  366. ret = -E2BIG;
  367. goto out;
  368. }
  369. if (kmapped_page) {
  370. flush_kernel_dcache_page(kmapped_page);
  371. kunmap(kmapped_page);
  372. put_arg_page(kmapped_page);
  373. }
  374. kmapped_page = page;
  375. kaddr = kmap(kmapped_page);
  376. kpos = pos & PAGE_MASK;
  377. flush_arg_page(bprm, kpos, kmapped_page);
  378. }
  379. if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
  380. ret = -EFAULT;
  381. goto out;
  382. }
  383. }
  384. }
  385. ret = 0;
  386. out:
  387. if (kmapped_page) {
  388. flush_kernel_dcache_page(kmapped_page);
  389. kunmap(kmapped_page);
  390. put_arg_page(kmapped_page);
  391. }
  392. return ret;
  393. }
  394. /*
  395. * Like copy_strings, but get argv and its values from kernel memory.
  396. */
  397. int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
  398. {
  399. int r;
  400. mm_segment_t oldfs = get_fs();
  401. set_fs(KERNEL_DS);
  402. r = copy_strings(argc, (char __user * __user *)argv, bprm);
  403. set_fs(oldfs);
  404. return r;
  405. }
  406. EXPORT_SYMBOL(copy_strings_kernel);
  407. #ifdef CONFIG_MMU
  408. /*
  409. * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
  410. * the binfmt code determines where the new stack should reside, we shift it to
  411. * its final location. The process proceeds as follows:
  412. *
  413. * 1) Use shift to calculate the new vma endpoints.
  414. * 2) Extend vma to cover both the old and new ranges. This ensures the
  415. * arguments passed to subsequent functions are consistent.
  416. * 3) Move vma's page tables to the new range.
  417. * 4) Free up any cleared pgd range.
  418. * 5) Shrink the vma to cover only the new range.
  419. */
  420. static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
  421. {
  422. struct mm_struct *mm = vma->vm_mm;
  423. unsigned long old_start = vma->vm_start;
  424. unsigned long old_end = vma->vm_end;
  425. unsigned long length = old_end - old_start;
  426. unsigned long new_start = old_start - shift;
  427. unsigned long new_end = old_end - shift;
  428. struct mmu_gather *tlb;
  429. BUG_ON(new_start > new_end);
  430. /*
  431. * ensure there are no vmas between where we want to go
  432. * and where we are
  433. */
  434. if (vma != find_vma(mm, new_start))
  435. return -EFAULT;
  436. /*
  437. * cover the whole range: [new_start, old_end)
  438. */
  439. vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
  440. /*
  441. * move the page tables downwards, on failure we rely on
  442. * process cleanup to remove whatever mess we made.
  443. */
  444. if (length != move_page_tables(vma, old_start,
  445. vma, new_start, length))
  446. return -ENOMEM;
  447. lru_add_drain();
  448. tlb = tlb_gather_mmu(mm, 0);
  449. if (new_end > old_start) {
  450. /*
  451. * when the old and new regions overlap clear from new_end.
  452. */
  453. free_pgd_range(&tlb, new_end, old_end, new_end,
  454. vma->vm_next ? vma->vm_next->vm_start : 0);
  455. } else {
  456. /*
  457. * otherwise, clean from old_start; this is done to not touch
  458. * the address space in [new_end, old_start) some architectures
  459. * have constraints on va-space that make this illegal (IA64) -
  460. * for the others its just a little faster.
  461. */
  462. free_pgd_range(&tlb, old_start, old_end, new_end,
  463. vma->vm_next ? vma->vm_next->vm_start : 0);
  464. }
  465. tlb_finish_mmu(tlb, new_end, old_end);
  466. /*
  467. * shrink the vma to just the new range.
  468. */
  469. vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
  470. return 0;
  471. }
  472. #define EXTRA_STACK_VM_PAGES 20 /* random */
  473. /*
  474. * Finalizes the stack vm_area_struct. The flags and permissions are updated,
  475. * the stack is optionally relocated, and some extra space is added.
  476. */
  477. int setup_arg_pages(struct linux_binprm *bprm,
  478. unsigned long stack_top,
  479. int executable_stack)
  480. {
  481. unsigned long ret;
  482. unsigned long stack_shift;
  483. struct mm_struct *mm = current->mm;
  484. struct vm_area_struct *vma = bprm->vma;
  485. struct vm_area_struct *prev = NULL;
  486. unsigned long vm_flags;
  487. unsigned long stack_base;
  488. #ifdef CONFIG_STACK_GROWSUP
  489. /* Limit stack size to 1GB */
  490. stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
  491. if (stack_base > (1 << 30))
  492. stack_base = 1 << 30;
  493. /* Make sure we didn't let the argument array grow too large. */
  494. if (vma->vm_end - vma->vm_start > stack_base)
  495. return -ENOMEM;
  496. stack_base = PAGE_ALIGN(stack_top - stack_base);
  497. stack_shift = vma->vm_start - stack_base;
  498. mm->arg_start = bprm->p - stack_shift;
  499. bprm->p = vma->vm_end - stack_shift;
  500. #else
  501. stack_top = arch_align_stack(stack_top);
  502. stack_top = PAGE_ALIGN(stack_top);
  503. stack_shift = vma->vm_end - stack_top;
  504. bprm->p -= stack_shift;
  505. mm->arg_start = bprm->p;
  506. #endif
  507. if (bprm->loader)
  508. bprm->loader -= stack_shift;
  509. bprm->exec -= stack_shift;
  510. down_write(&mm->mmap_sem);
  511. vm_flags = vma->vm_flags;
  512. /*
  513. * Adjust stack execute permissions; explicitly enable for
  514. * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
  515. * (arch default) otherwise.
  516. */
  517. if (unlikely(executable_stack == EXSTACK_ENABLE_X))
  518. vm_flags |= VM_EXEC;
  519. else if (executable_stack == EXSTACK_DISABLE_X)
  520. vm_flags &= ~VM_EXEC;
  521. vm_flags |= mm->def_flags;
  522. ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
  523. vm_flags);
  524. if (ret)
  525. goto out_unlock;
  526. BUG_ON(prev != vma);
  527. /* Move stack pages down in memory. */
  528. if (stack_shift) {
  529. ret = shift_arg_pages(vma, stack_shift);
  530. if (ret) {
  531. up_write(&mm->mmap_sem);
  532. return ret;
  533. }
  534. }
  535. #ifdef CONFIG_STACK_GROWSUP
  536. stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
  537. #else
  538. stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
  539. #endif
  540. ret = expand_stack(vma, stack_base);
  541. if (ret)
  542. ret = -EFAULT;
  543. out_unlock:
  544. up_write(&mm->mmap_sem);
  545. return 0;
  546. }
  547. EXPORT_SYMBOL(setup_arg_pages);
  548. #endif /* CONFIG_MMU */
  549. struct file *open_exec(const char *name)
  550. {
  551. struct nameidata nd;
  552. int err;
  553. struct file *file;
  554. err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
  555. file = ERR_PTR(err);
  556. if (!err) {
  557. struct inode *inode = nd.dentry->d_inode;
  558. file = ERR_PTR(-EACCES);
  559. if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
  560. S_ISREG(inode->i_mode)) {
  561. int err = vfs_permission(&nd, MAY_EXEC);
  562. file = ERR_PTR(err);
  563. if (!err) {
  564. file = nameidata_to_filp(&nd, O_RDONLY);
  565. if (!IS_ERR(file)) {
  566. err = deny_write_access(file);
  567. if (err) {
  568. fput(file);
  569. file = ERR_PTR(err);
  570. }
  571. }
  572. out:
  573. return file;
  574. }
  575. }
  576. release_open_intent(&nd);
  577. path_release(&nd);
  578. }
  579. goto out;
  580. }
  581. EXPORT_SYMBOL(open_exec);
  582. int kernel_read(struct file *file, unsigned long offset,
  583. char *addr, unsigned long count)
  584. {
  585. mm_segment_t old_fs;
  586. loff_t pos = offset;
  587. int result;
  588. old_fs = get_fs();
  589. set_fs(get_ds());
  590. /* The cast to a user pointer is valid due to the set_fs() */
  591. result = vfs_read(file, (void __user *)addr, count, &pos);
  592. set_fs(old_fs);
  593. return result;
  594. }
  595. EXPORT_SYMBOL(kernel_read);
  596. static int exec_mmap(struct mm_struct *mm)
  597. {
  598. struct task_struct *tsk;
  599. struct mm_struct * old_mm, *active_mm;
  600. /* Notify parent that we're no longer interested in the old VM */
  601. tsk = current;
  602. old_mm = current->mm;
  603. mm_release(tsk, old_mm);
  604. if (old_mm) {
  605. /*
  606. * Make sure that if there is a core dump in progress
  607. * for the old mm, we get out and die instead of going
  608. * through with the exec. We must hold mmap_sem around
  609. * checking core_waiters and changing tsk->mm. The
  610. * core-inducing thread will increment core_waiters for
  611. * each thread whose ->mm == old_mm.
  612. */
  613. down_read(&old_mm->mmap_sem);
  614. if (unlikely(old_mm->core_waiters)) {
  615. up_read(&old_mm->mmap_sem);
  616. return -EINTR;
  617. }
  618. }
  619. task_lock(tsk);
  620. active_mm = tsk->active_mm;
  621. tsk->mm = mm;
  622. tsk->active_mm = mm;
  623. activate_mm(active_mm, mm);
  624. task_unlock(tsk);
  625. arch_pick_mmap_layout(mm);
  626. if (old_mm) {
  627. up_read(&old_mm->mmap_sem);
  628. BUG_ON(active_mm != old_mm);
  629. mmput(old_mm);
  630. return 0;
  631. }
  632. mmdrop(active_mm);
  633. return 0;
  634. }
  635. /*
  636. * This function makes sure the current process has its own signal table,
  637. * so that flush_signal_handlers can later reset the handlers without
  638. * disturbing other processes. (Other processes might share the signal
  639. * table via the CLONE_SIGHAND option to clone().)
  640. */
  641. static int de_thread(struct task_struct *tsk)
  642. {
  643. struct signal_struct *sig = tsk->signal;
  644. struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
  645. spinlock_t *lock = &oldsighand->siglock;
  646. struct task_struct *leader = NULL;
  647. int count;
  648. /*
  649. * If we don't share sighandlers, then we aren't sharing anything
  650. * and we can just re-use it all.
  651. */
  652. if (atomic_read(&oldsighand->count) <= 1) {
  653. exit_itimers(sig);
  654. return 0;
  655. }
  656. newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
  657. if (!newsighand)
  658. return -ENOMEM;
  659. if (thread_group_empty(tsk))
  660. goto no_thread_group;
  661. /*
  662. * Kill all other threads in the thread group.
  663. * We must hold tasklist_lock to call zap_other_threads.
  664. */
  665. read_lock(&tasklist_lock);
  666. spin_lock_irq(lock);
  667. if (sig->flags & SIGNAL_GROUP_EXIT) {
  668. /*
  669. * Another group action in progress, just
  670. * return so that the signal is processed.
  671. */
  672. spin_unlock_irq(lock);
  673. read_unlock(&tasklist_lock);
  674. kmem_cache_free(sighand_cachep, newsighand);
  675. return -EAGAIN;
  676. }
  677. /*
  678. * child_reaper ignores SIGKILL, change it now.
  679. * Reparenting needs write_lock on tasklist_lock,
  680. * so it is safe to do it under read_lock.
  681. */
  682. if (unlikely(tsk->group_leader == child_reaper(tsk)))
  683. tsk->nsproxy->pid_ns->child_reaper = tsk;
  684. zap_other_threads(tsk);
  685. read_unlock(&tasklist_lock);
  686. /*
  687. * Account for the thread group leader hanging around:
  688. */
  689. count = 1;
  690. if (!thread_group_leader(tsk)) {
  691. count = 2;
  692. /*
  693. * The SIGALRM timer survives the exec, but needs to point
  694. * at us as the new group leader now. We have a race with
  695. * a timer firing now getting the old leader, so we need to
  696. * synchronize with any firing (by calling del_timer_sync)
  697. * before we can safely let the old group leader die.
  698. */
  699. sig->tsk = tsk;
  700. spin_unlock_irq(lock);
  701. if (hrtimer_cancel(&sig->real_timer))
  702. hrtimer_restart(&sig->real_timer);
  703. spin_lock_irq(lock);
  704. }
  705. while (atomic_read(&sig->count) > count) {
  706. sig->group_exit_task = tsk;
  707. sig->notify_count = count;
  708. __set_current_state(TASK_UNINTERRUPTIBLE);
  709. spin_unlock_irq(lock);
  710. schedule();
  711. spin_lock_irq(lock);
  712. }
  713. sig->group_exit_task = NULL;
  714. sig->notify_count = 0;
  715. spin_unlock_irq(lock);
  716. /*
  717. * At this point all other threads have exited, all we have to
  718. * do is to wait for the thread group leader to become inactive,
  719. * and to assume its PID:
  720. */
  721. if (!thread_group_leader(tsk)) {
  722. /*
  723. * Wait for the thread group leader to be a zombie.
  724. * It should already be zombie at this point, most
  725. * of the time.
  726. */
  727. leader = tsk->group_leader;
  728. while (leader->exit_state != EXIT_ZOMBIE)
  729. yield();
  730. /*
  731. * The only record we have of the real-time age of a
  732. * process, regardless of execs it's done, is start_time.
  733. * All the past CPU time is accumulated in signal_struct
  734. * from sister threads now dead. But in this non-leader
  735. * exec, nothing survives from the original leader thread,
  736. * whose birth marks the true age of this process now.
  737. * When we take on its identity by switching to its PID, we
  738. * also take its birthdate (always earlier than our own).
  739. */
  740. tsk->start_time = leader->start_time;
  741. write_lock_irq(&tasklist_lock);
  742. BUG_ON(leader->tgid != tsk->tgid);
  743. BUG_ON(tsk->pid == tsk->tgid);
  744. /*
  745. * An exec() starts a new thread group with the
  746. * TGID of the previous thread group. Rehash the
  747. * two threads with a switched PID, and release
  748. * the former thread group leader:
  749. */
  750. /* Become a process group leader with the old leader's pid.
  751. * The old leader becomes a thread of the this thread group.
  752. * Note: The old leader also uses this pid until release_task
  753. * is called. Odd but simple and correct.
  754. */
  755. detach_pid(tsk, PIDTYPE_PID);
  756. tsk->pid = leader->pid;
  757. attach_pid(tsk, PIDTYPE_PID, find_pid(tsk->pid));
  758. transfer_pid(leader, tsk, PIDTYPE_PGID);
  759. transfer_pid(leader, tsk, PIDTYPE_SID);
  760. list_replace_rcu(&leader->tasks, &tsk->tasks);
  761. tsk->group_leader = tsk;
  762. leader->group_leader = tsk;
  763. tsk->exit_signal = SIGCHLD;
  764. BUG_ON(leader->exit_state != EXIT_ZOMBIE);
  765. leader->exit_state = EXIT_DEAD;
  766. write_unlock_irq(&tasklist_lock);
  767. }
  768. /*
  769. * There may be one thread left which is just exiting,
  770. * but it's safe to stop telling the group to kill themselves.
  771. */
  772. sig->flags = 0;
  773. no_thread_group:
  774. exit_itimers(sig);
  775. if (leader)
  776. release_task(leader);
  777. if (atomic_read(&oldsighand->count) == 1) {
  778. /*
  779. * Now that we nuked the rest of the thread group,
  780. * it turns out we are not sharing sighand any more either.
  781. * So we can just keep it.
  782. */
  783. kmem_cache_free(sighand_cachep, newsighand);
  784. } else {
  785. /*
  786. * Move our state over to newsighand and switch it in.
  787. */
  788. atomic_set(&newsighand->count, 1);
  789. memcpy(newsighand->action, oldsighand->action,
  790. sizeof(newsighand->action));
  791. write_lock_irq(&tasklist_lock);
  792. spin_lock(&oldsighand->siglock);
  793. spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
  794. rcu_assign_pointer(tsk->sighand, newsighand);
  795. recalc_sigpending();
  796. spin_unlock(&newsighand->siglock);
  797. spin_unlock(&oldsighand->siglock);
  798. write_unlock_irq(&tasklist_lock);
  799. __cleanup_sighand(oldsighand);
  800. }
  801. BUG_ON(!thread_group_leader(tsk));
  802. return 0;
  803. }
  804. /*
  805. * These functions flushes out all traces of the currently running executable
  806. * so that a new one can be started
  807. */
  808. static void flush_old_files(struct files_struct * files)
  809. {
  810. long j = -1;
  811. struct fdtable *fdt;
  812. spin_lock(&files->file_lock);
  813. for (;;) {
  814. unsigned long set, i;
  815. j++;
  816. i = j * __NFDBITS;
  817. fdt = files_fdtable(files);
  818. if (i >= fdt->max_fds)
  819. break;
  820. set = fdt->close_on_exec->fds_bits[j];
  821. if (!set)
  822. continue;
  823. fdt->close_on_exec->fds_bits[j] = 0;
  824. spin_unlock(&files->file_lock);
  825. for ( ; set ; i++,set >>= 1) {
  826. if (set & 1) {
  827. sys_close(i);
  828. }
  829. }
  830. spin_lock(&files->file_lock);
  831. }
  832. spin_unlock(&files->file_lock);
  833. }
  834. void get_task_comm(char *buf, struct task_struct *tsk)
  835. {
  836. /* buf must be at least sizeof(tsk->comm) in size */
  837. task_lock(tsk);
  838. strncpy(buf, tsk->comm, sizeof(tsk->comm));
  839. task_unlock(tsk);
  840. }
  841. void set_task_comm(struct task_struct *tsk, char *buf)
  842. {
  843. task_lock(tsk);
  844. strlcpy(tsk->comm, buf, sizeof(tsk->comm));
  845. task_unlock(tsk);
  846. }
  847. int flush_old_exec(struct linux_binprm * bprm)
  848. {
  849. char * name;
  850. int i, ch, retval;
  851. struct files_struct *files;
  852. char tcomm[sizeof(current->comm)];
  853. /*
  854. * Make sure we have a private signal table and that
  855. * we are unassociated from the previous thread group.
  856. */
  857. retval = de_thread(current);
  858. if (retval)
  859. goto out;
  860. /*
  861. * Make sure we have private file handles. Ask the
  862. * fork helper to do the work for us and the exit
  863. * helper to do the cleanup of the old one.
  864. */
  865. files = current->files; /* refcounted so safe to hold */
  866. retval = unshare_files();
  867. if (retval)
  868. goto out;
  869. /*
  870. * Release all of the old mmap stuff
  871. */
  872. retval = exec_mmap(bprm->mm);
  873. if (retval)
  874. goto mmap_failed;
  875. bprm->mm = NULL; /* We're using it now */
  876. /* This is the point of no return */
  877. put_files_struct(files);
  878. current->sas_ss_sp = current->sas_ss_size = 0;
  879. if (current->euid == current->uid && current->egid == current->gid)
  880. set_dumpable(current->mm, 1);
  881. else
  882. set_dumpable(current->mm, suid_dumpable);
  883. name = bprm->filename;
  884. /* Copies the binary name from after last slash */
  885. for (i=0; (ch = *(name++)) != '\0';) {
  886. if (ch == '/')
  887. i = 0; /* overwrite what we wrote */
  888. else
  889. if (i < (sizeof(tcomm) - 1))
  890. tcomm[i++] = ch;
  891. }
  892. tcomm[i] = '\0';
  893. set_task_comm(current, tcomm);
  894. current->flags &= ~PF_RANDOMIZE;
  895. flush_thread();
  896. /* Set the new mm task size. We have to do that late because it may
  897. * depend on TIF_32BIT which is only updated in flush_thread() on
  898. * some architectures like powerpc
  899. */
  900. current->mm->task_size = TASK_SIZE;
  901. if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
  902. suid_keys(current);
  903. set_dumpable(current->mm, suid_dumpable);
  904. current->pdeath_signal = 0;
  905. } else if (file_permission(bprm->file, MAY_READ) ||
  906. (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
  907. suid_keys(current);
  908. set_dumpable(current->mm, suid_dumpable);
  909. }
  910. /* An exec changes our domain. We are no longer part of the thread
  911. group */
  912. current->self_exec_id++;
  913. flush_signal_handlers(current, 0);
  914. flush_old_files(current->files);
  915. return 0;
  916. mmap_failed:
  917. reset_files_struct(current, files);
  918. out:
  919. return retval;
  920. }
  921. EXPORT_SYMBOL(flush_old_exec);
  922. /*
  923. * Fill the binprm structure from the inode.
  924. * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
  925. */
  926. int prepare_binprm(struct linux_binprm *bprm)
  927. {
  928. int mode;
  929. struct inode * inode = bprm->file->f_path.dentry->d_inode;
  930. int retval;
  931. mode = inode->i_mode;
  932. if (bprm->file->f_op == NULL)
  933. return -EACCES;
  934. bprm->e_uid = current->euid;
  935. bprm->e_gid = current->egid;
  936. if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
  937. /* Set-uid? */
  938. if (mode & S_ISUID) {
  939. current->personality &= ~PER_CLEAR_ON_SETID;
  940. bprm->e_uid = inode->i_uid;
  941. }
  942. /* Set-gid? */
  943. /*
  944. * If setgid is set but no group execute bit then this
  945. * is a candidate for mandatory locking, not a setgid
  946. * executable.
  947. */
  948. if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
  949. current->personality &= ~PER_CLEAR_ON_SETID;
  950. bprm->e_gid = inode->i_gid;
  951. }
  952. }
  953. /* fill in binprm security blob */
  954. retval = security_bprm_set(bprm);
  955. if (retval)
  956. return retval;
  957. memset(bprm->buf,0,BINPRM_BUF_SIZE);
  958. return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
  959. }
  960. EXPORT_SYMBOL(prepare_binprm);
  961. static int unsafe_exec(struct task_struct *p)
  962. {
  963. int unsafe = 0;
  964. if (p->ptrace & PT_PTRACED) {
  965. if (p->ptrace & PT_PTRACE_CAP)
  966. unsafe |= LSM_UNSAFE_PTRACE_CAP;
  967. else
  968. unsafe |= LSM_UNSAFE_PTRACE;
  969. }
  970. if (atomic_read(&p->fs->count) > 1 ||
  971. atomic_read(&p->files->count) > 1 ||
  972. atomic_read(&p->sighand->count) > 1)
  973. unsafe |= LSM_UNSAFE_SHARE;
  974. return unsafe;
  975. }
  976. void compute_creds(struct linux_binprm *bprm)
  977. {
  978. int unsafe;
  979. if (bprm->e_uid != current->uid) {
  980. suid_keys(current);
  981. current->pdeath_signal = 0;
  982. }
  983. exec_keys(current);
  984. task_lock(current);
  985. unsafe = unsafe_exec(current);
  986. security_bprm_apply_creds(bprm, unsafe);
  987. task_unlock(current);
  988. security_bprm_post_apply_creds(bprm);
  989. }
  990. EXPORT_SYMBOL(compute_creds);
  991. /*
  992. * Arguments are '\0' separated strings found at the location bprm->p
  993. * points to; chop off the first by relocating brpm->p to right after
  994. * the first '\0' encountered.
  995. */
  996. int remove_arg_zero(struct linux_binprm *bprm)
  997. {
  998. int ret = 0;
  999. unsigned long offset;
  1000. char *kaddr;
  1001. struct page *page;
  1002. if (!bprm->argc)
  1003. return 0;
  1004. do {
  1005. offset = bprm->p & ~PAGE_MASK;
  1006. page = get_arg_page(bprm, bprm->p, 0);
  1007. if (!page) {
  1008. ret = -EFAULT;
  1009. goto out;
  1010. }
  1011. kaddr = kmap_atomic(page, KM_USER0);
  1012. for (; offset < PAGE_SIZE && kaddr[offset];
  1013. offset++, bprm->p++)
  1014. ;
  1015. kunmap_atomic(kaddr, KM_USER0);
  1016. put_arg_page(page);
  1017. if (offset == PAGE_SIZE)
  1018. free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
  1019. } while (offset == PAGE_SIZE);
  1020. bprm->p++;
  1021. bprm->argc--;
  1022. ret = 0;
  1023. out:
  1024. return ret;
  1025. }
  1026. EXPORT_SYMBOL(remove_arg_zero);
  1027. /*
  1028. * cycle the list of binary formats handler, until one recognizes the image
  1029. */
  1030. int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
  1031. {
  1032. int try,retval;
  1033. struct linux_binfmt *fmt;
  1034. #ifdef __alpha__
  1035. /* handle /sbin/loader.. */
  1036. {
  1037. struct exec * eh = (struct exec *) bprm->buf;
  1038. if (!bprm->loader && eh->fh.f_magic == 0x183 &&
  1039. (eh->fh.f_flags & 0x3000) == 0x3000)
  1040. {
  1041. struct file * file;
  1042. unsigned long loader;
  1043. allow_write_access(bprm->file);
  1044. fput(bprm->file);
  1045. bprm->file = NULL;
  1046. loader = bprm->vma->vm_end - sizeof(void *);
  1047. file = open_exec("/sbin/loader");
  1048. retval = PTR_ERR(file);
  1049. if (IS_ERR(file))
  1050. return retval;
  1051. /* Remember if the application is TASO. */
  1052. bprm->sh_bang = eh->ah.entry < 0x100000000UL;
  1053. bprm->file = file;
  1054. bprm->loader = loader;
  1055. retval = prepare_binprm(bprm);
  1056. if (retval<0)
  1057. return retval;
  1058. /* should call search_binary_handler recursively here,
  1059. but it does not matter */
  1060. }
  1061. }
  1062. #endif
  1063. retval = security_bprm_check(bprm);
  1064. if (retval)
  1065. return retval;
  1066. /* kernel module loader fixup */
  1067. /* so we don't try to load run modprobe in kernel space. */
  1068. set_fs(USER_DS);
  1069. retval = audit_bprm(bprm);
  1070. if (retval)
  1071. return retval;
  1072. retval = -ENOENT;
  1073. for (try=0; try<2; try++) {
  1074. read_lock(&binfmt_lock);
  1075. list_for_each_entry(fmt, &formats, lh) {
  1076. int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
  1077. if (!fn)
  1078. continue;
  1079. if (!try_module_get(fmt->module))
  1080. continue;
  1081. read_unlock(&binfmt_lock);
  1082. retval = fn(bprm, regs);
  1083. if (retval >= 0) {
  1084. put_binfmt(fmt);
  1085. allow_write_access(bprm->file);
  1086. if (bprm->file)
  1087. fput(bprm->file);
  1088. bprm->file = NULL;
  1089. current->did_exec = 1;
  1090. proc_exec_connector(current);
  1091. return retval;
  1092. }
  1093. read_lock(&binfmt_lock);
  1094. put_binfmt(fmt);
  1095. if (retval != -ENOEXEC || bprm->mm == NULL)
  1096. break;
  1097. if (!bprm->file) {
  1098. read_unlock(&binfmt_lock);
  1099. return retval;
  1100. }
  1101. }
  1102. read_unlock(&binfmt_lock);
  1103. if (retval != -ENOEXEC || bprm->mm == NULL) {
  1104. break;
  1105. #ifdef CONFIG_KMOD
  1106. }else{
  1107. #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
  1108. if (printable(bprm->buf[0]) &&
  1109. printable(bprm->buf[1]) &&
  1110. printable(bprm->buf[2]) &&
  1111. printable(bprm->buf[3]))
  1112. break; /* -ENOEXEC */
  1113. request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
  1114. #endif
  1115. }
  1116. }
  1117. return retval;
  1118. }
  1119. EXPORT_SYMBOL(search_binary_handler);
  1120. /*
  1121. * sys_execve() executes a new program.
  1122. */
  1123. int do_execve(char * filename,
  1124. char __user *__user *argv,
  1125. char __user *__user *envp,
  1126. struct pt_regs * regs)
  1127. {
  1128. struct linux_binprm *bprm;
  1129. struct file *file;
  1130. unsigned long env_p;
  1131. int retval;
  1132. retval = -ENOMEM;
  1133. bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
  1134. if (!bprm)
  1135. goto out_ret;
  1136. file = open_exec(filename);
  1137. retval = PTR_ERR(file);
  1138. if (IS_ERR(file))
  1139. goto out_kfree;
  1140. sched_exec();
  1141. bprm->file = file;
  1142. bprm->filename = filename;
  1143. bprm->interp = filename;
  1144. retval = bprm_mm_init(bprm);
  1145. if (retval)
  1146. goto out_file;
  1147. bprm->argc = count(argv, MAX_ARG_STRINGS);
  1148. if ((retval = bprm->argc) < 0)
  1149. goto out_mm;
  1150. bprm->envc = count(envp, MAX_ARG_STRINGS);
  1151. if ((retval = bprm->envc) < 0)
  1152. goto out_mm;
  1153. retval = security_bprm_alloc(bprm);
  1154. if (retval)
  1155. goto out;
  1156. retval = prepare_binprm(bprm);
  1157. if (retval < 0)
  1158. goto out;
  1159. retval = copy_strings_kernel(1, &bprm->filename, bprm);
  1160. if (retval < 0)
  1161. goto out;
  1162. bprm->exec = bprm->p;
  1163. retval = copy_strings(bprm->envc, envp, bprm);
  1164. if (retval < 0)
  1165. goto out;
  1166. env_p = bprm->p;
  1167. retval = copy_strings(bprm->argc, argv, bprm);
  1168. if (retval < 0)
  1169. goto out;
  1170. bprm->argv_len = env_p - bprm->p;
  1171. retval = search_binary_handler(bprm,regs);
  1172. if (retval >= 0) {
  1173. /* execve success */
  1174. free_arg_pages(bprm);
  1175. security_bprm_free(bprm);
  1176. acct_update_integrals(current);
  1177. kfree(bprm);
  1178. return retval;
  1179. }
  1180. out:
  1181. free_arg_pages(bprm);
  1182. if (bprm->security)
  1183. security_bprm_free(bprm);
  1184. out_mm:
  1185. if (bprm->mm)
  1186. mmput (bprm->mm);
  1187. out_file:
  1188. if (bprm->file) {
  1189. allow_write_access(bprm->file);
  1190. fput(bprm->file);
  1191. }
  1192. out_kfree:
  1193. kfree(bprm);
  1194. out_ret:
  1195. return retval;
  1196. }
  1197. int set_binfmt(struct linux_binfmt *new)
  1198. {
  1199. struct linux_binfmt *old = current->binfmt;
  1200. if (new) {
  1201. if (!try_module_get(new->module))
  1202. return -1;
  1203. }
  1204. current->binfmt = new;
  1205. if (old)
  1206. module_put(old->module);
  1207. return 0;
  1208. }
  1209. EXPORT_SYMBOL(set_binfmt);
  1210. /* format_corename will inspect the pattern parameter, and output a
  1211. * name into corename, which must have space for at least
  1212. * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
  1213. */
  1214. static int format_corename(char *corename, const char *pattern, long signr)
  1215. {
  1216. const char *pat_ptr = pattern;
  1217. char *out_ptr = corename;
  1218. char *const out_end = corename + CORENAME_MAX_SIZE;
  1219. int rc;
  1220. int pid_in_pattern = 0;
  1221. int ispipe = 0;
  1222. if (*pattern == '|')
  1223. ispipe = 1;
  1224. /* Repeat as long as we have more pattern to process and more output
  1225. space */
  1226. while (*pat_ptr) {
  1227. if (*pat_ptr != '%') {
  1228. if (out_ptr == out_end)
  1229. goto out;
  1230. *out_ptr++ = *pat_ptr++;
  1231. } else {
  1232. switch (*++pat_ptr) {
  1233. case 0:
  1234. goto out;
  1235. /* Double percent, output one percent */
  1236. case '%':
  1237. if (out_ptr == out_end)
  1238. goto out;
  1239. *out_ptr++ = '%';
  1240. break;
  1241. /* pid */
  1242. case 'p':
  1243. pid_in_pattern = 1;
  1244. rc = snprintf(out_ptr, out_end - out_ptr,
  1245. "%d", current->tgid);
  1246. if (rc > out_end - out_ptr)
  1247. goto out;
  1248. out_ptr += rc;
  1249. break;
  1250. /* uid */
  1251. case 'u':
  1252. rc = snprintf(out_ptr, out_end - out_ptr,
  1253. "%d", current->uid);
  1254. if (rc > out_end - out_ptr)
  1255. goto out;
  1256. out_ptr += rc;
  1257. break;
  1258. /* gid */
  1259. case 'g':
  1260. rc = snprintf(out_ptr, out_end - out_ptr,
  1261. "%d", current->gid);
  1262. if (rc > out_end - out_ptr)
  1263. goto out;
  1264. out_ptr += rc;
  1265. break;
  1266. /* signal that caused the coredump */
  1267. case 's':
  1268. rc = snprintf(out_ptr, out_end - out_ptr,
  1269. "%ld", signr);
  1270. if (rc > out_end - out_ptr)
  1271. goto out;
  1272. out_ptr += rc;
  1273. break;
  1274. /* UNIX time of coredump */
  1275. case 't': {
  1276. struct timeval tv;
  1277. do_gettimeofday(&tv);
  1278. rc = snprintf(out_ptr, out_end - out_ptr,
  1279. "%lu", tv.tv_sec);
  1280. if (rc > out_end - out_ptr)
  1281. goto out;
  1282. out_ptr += rc;
  1283. break;
  1284. }
  1285. /* hostname */
  1286. case 'h':
  1287. down_read(&uts_sem);
  1288. rc = snprintf(out_ptr, out_end - out_ptr,
  1289. "%s", utsname()->nodename);
  1290. up_read(&uts_sem);
  1291. if (rc > out_end - out_ptr)
  1292. goto out;
  1293. out_ptr += rc;
  1294. break;
  1295. /* executable */
  1296. case 'e':
  1297. rc = snprintf(out_ptr, out_end - out_ptr,
  1298. "%s", current->comm);
  1299. if (rc > out_end - out_ptr)
  1300. goto out;
  1301. out_ptr += rc;
  1302. break;
  1303. /* core limit size */
  1304. case 'c':
  1305. rc = snprintf(out_ptr, out_end - out_ptr,
  1306. "%lu", current->signal->rlim[RLIMIT_CORE].rlim_cur);
  1307. if (rc > out_end - out_ptr)
  1308. goto out;
  1309. out_ptr += rc;
  1310. break;
  1311. default:
  1312. break;
  1313. }
  1314. ++pat_ptr;
  1315. }
  1316. }
  1317. /* Backward compatibility with core_uses_pid:
  1318. *
  1319. * If core_pattern does not include a %p (as is the default)
  1320. * and core_uses_pid is set, then .%pid will be appended to
  1321. * the filename. Do not do this for piped commands. */
  1322. if (!ispipe && !pid_in_pattern
  1323. && (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) {
  1324. rc = snprintf(out_ptr, out_end - out_ptr,
  1325. ".%d", current->tgid);
  1326. if (rc > out_end - out_ptr)
  1327. goto out;
  1328. out_ptr += rc;
  1329. }
  1330. out:
  1331. *out_ptr = 0;
  1332. return ispipe;
  1333. }
  1334. static void zap_process(struct task_struct *start)
  1335. {
  1336. struct task_struct *t;
  1337. start->signal->flags = SIGNAL_GROUP_EXIT;
  1338. start->signal->group_stop_count = 0;
  1339. t = start;
  1340. do {
  1341. if (t != current && t->mm) {
  1342. t->mm->core_waiters++;
  1343. sigaddset(&t->pending.signal, SIGKILL);
  1344. signal_wake_up(t, 1);
  1345. }
  1346. } while ((t = next_thread(t)) != start);
  1347. }
  1348. static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
  1349. int exit_code)
  1350. {
  1351. struct task_struct *g, *p;
  1352. unsigned long flags;
  1353. int err = -EAGAIN;
  1354. spin_lock_irq(&tsk->sighand->siglock);
  1355. if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
  1356. tsk->signal->group_exit_code = exit_code;
  1357. zap_process(tsk);
  1358. err = 0;
  1359. }
  1360. spin_unlock_irq(&tsk->sighand->siglock);
  1361. if (err)
  1362. return err;
  1363. if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
  1364. goto done;
  1365. rcu_read_lock();
  1366. for_each_process(g) {
  1367. if (g == tsk->group_leader)
  1368. continue;
  1369. p = g;
  1370. do {
  1371. if (p->mm) {
  1372. if (p->mm == mm) {
  1373. /*
  1374. * p->sighand can't disappear, but
  1375. * may be changed by de_thread()
  1376. */
  1377. lock_task_sighand(p, &flags);
  1378. zap_process(p);
  1379. unlock_task_sighand(p, &flags);
  1380. }
  1381. break;
  1382. }
  1383. } while ((p = next_thread(p)) != g);
  1384. }
  1385. rcu_read_unlock();
  1386. done:
  1387. return mm->core_waiters;
  1388. }
  1389. static int coredump_wait(int exit_code)
  1390. {
  1391. struct task_struct *tsk = current;
  1392. struct mm_struct *mm = tsk->mm;
  1393. struct completion startup_done;
  1394. struct completion *vfork_done;
  1395. int core_waiters;
  1396. init_completion(&mm->core_done);
  1397. init_completion(&startup_done);
  1398. mm->core_startup_done = &startup_done;
  1399. core_waiters = zap_threads(tsk, mm, exit_code);
  1400. up_write(&mm->mmap_sem);
  1401. if (unlikely(core_waiters < 0))
  1402. goto fail;
  1403. /*
  1404. * Make sure nobody is waiting for us to release the VM,
  1405. * otherwise we can deadlock when we wait on each other
  1406. */
  1407. vfork_done = tsk->vfork_done;
  1408. if (vfork_done) {
  1409. tsk->vfork_done = NULL;
  1410. complete(vfork_done);
  1411. }
  1412. if (core_waiters)
  1413. wait_for_completion(&startup_done);
  1414. fail:
  1415. BUG_ON(mm->core_waiters);
  1416. return core_waiters;
  1417. }
  1418. /*
  1419. * set_dumpable converts traditional three-value dumpable to two flags and
  1420. * stores them into mm->flags. It modifies lower two bits of mm->flags, but
  1421. * these bits are not changed atomically. So get_dumpable can observe the
  1422. * intermediate state. To avoid doing unexpected behavior, get get_dumpable
  1423. * return either old dumpable or new one by paying attention to the order of
  1424. * modifying the bits.
  1425. *
  1426. * dumpable | mm->flags (binary)
  1427. * old new | initial interim final
  1428. * ---------+-----------------------
  1429. * 0 1 | 00 01 01
  1430. * 0 2 | 00 10(*) 11
  1431. * 1 0 | 01 00 00
  1432. * 1 2 | 01 11 11
  1433. * 2 0 | 11 10(*) 00
  1434. * 2 1 | 11 11 01
  1435. *
  1436. * (*) get_dumpable regards interim value of 10 as 11.
  1437. */
  1438. void set_dumpable(struct mm_struct *mm, int value)
  1439. {
  1440. switch (value) {
  1441. case 0:
  1442. clear_bit(MMF_DUMPABLE, &mm->flags);
  1443. smp_wmb();
  1444. clear_bit(MMF_DUMP_SECURELY, &mm->flags);
  1445. break;
  1446. case 1:
  1447. set_bit(MMF_DUMPABLE, &mm->flags);
  1448. smp_wmb();
  1449. clear_bit(MMF_DUMP_SECURELY, &mm->flags);
  1450. break;
  1451. case 2:
  1452. set_bit(MMF_DUMP_SECURELY, &mm->flags);
  1453. smp_wmb();
  1454. set_bit(MMF_DUMPABLE, &mm->flags);
  1455. break;
  1456. }
  1457. }
  1458. EXPORT_SYMBOL_GPL(set_dumpable);
  1459. int get_dumpable(struct mm_struct *mm)
  1460. {
  1461. int ret;
  1462. ret = mm->flags & 0x3;
  1463. return (ret >= 2) ? 2 : ret;
  1464. }
  1465. int do_coredump(long signr, int exit_code, struct pt_regs * regs)
  1466. {
  1467. char corename[CORENAME_MAX_SIZE + 1];
  1468. struct mm_struct *mm = current->mm;
  1469. struct linux_binfmt * binfmt;
  1470. struct inode * inode;
  1471. struct file * file;
  1472. int retval = 0;
  1473. int fsuid = current->fsuid;
  1474. int flag = 0;
  1475. int ispipe = 0;
  1476. unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
  1477. char **helper_argv = NULL;
  1478. int helper_argc = 0;
  1479. char *delimit;
  1480. audit_core_dumps(signr);
  1481. binfmt = current->binfmt;
  1482. if (!binfmt || !binfmt->core_dump)
  1483. goto fail;
  1484. down_write(&mm->mmap_sem);
  1485. if (!get_dumpable(mm)) {
  1486. up_write(&mm->mmap_sem);
  1487. goto fail;
  1488. }
  1489. /*
  1490. * We cannot trust fsuid as being the "true" uid of the
  1491. * process nor do we know its entire history. We only know it
  1492. * was tainted so we dump it as root in mode 2.
  1493. */
  1494. if (get_dumpable(mm) == 2) { /* Setuid core dump mode */
  1495. flag = O_EXCL; /* Stop rewrite attacks */
  1496. current->fsuid = 0; /* Dump root private */
  1497. }
  1498. set_dumpable(mm, 0);
  1499. retval = coredump_wait(exit_code);
  1500. if (retval < 0)
  1501. goto fail;
  1502. /*
  1503. * Clear any false indication of pending signals that might
  1504. * be seen by the filesystem code called to write the core file.
  1505. */
  1506. clear_thread_flag(TIF_SIGPENDING);
  1507. /*
  1508. * lock_kernel() because format_corename() is controlled by sysctl, which
  1509. * uses lock_kernel()
  1510. */
  1511. lock_kernel();
  1512. ispipe = format_corename(corename, core_pattern, signr);
  1513. unlock_kernel();
  1514. /*
  1515. * Don't bother to check the RLIMIT_CORE value if core_pattern points
  1516. * to a pipe. Since we're not writing directly to the filesystem
  1517. * RLIMIT_CORE doesn't really apply, as no actual core file will be
  1518. * created unless the pipe reader choses to write out the core file
  1519. * at which point file size limits and permissions will be imposed
  1520. * as it does with any other process
  1521. */
  1522. if ((!ispipe) && (core_limit < binfmt->min_coredump))
  1523. goto fail_unlock;
  1524. if (ispipe) {
  1525. helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
  1526. /* Terminate the string before the first option */
  1527. delimit = strchr(corename, ' ');
  1528. if (delimit)
  1529. *delimit = '\0';
  1530. delimit = strrchr(helper_argv[0], '/');
  1531. if (delimit)
  1532. delimit++;
  1533. else
  1534. delimit = helper_argv[0];
  1535. if (!strcmp(delimit, current->comm)) {
  1536. printk(KERN_NOTICE "Recursive core dump detected, "
  1537. "aborting\n");
  1538. goto fail_unlock;
  1539. }
  1540. core_limit = RLIM_INFINITY;
  1541. /* SIGPIPE can happen, but it's just never processed */
  1542. if (call_usermodehelper_pipe(corename+1, helper_argv, NULL,
  1543. &file)) {
  1544. printk(KERN_INFO "Core dump to %s pipe failed\n",
  1545. corename);
  1546. goto fail_unlock;
  1547. }
  1548. } else
  1549. file = filp_open(corename,
  1550. O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
  1551. 0600);
  1552. if (IS_ERR(file))
  1553. goto fail_unlock;
  1554. inode = file->f_path.dentry->d_inode;
  1555. if (inode->i_nlink > 1)
  1556. goto close_fail; /* multiple links - don't dump */
  1557. if (!ispipe && d_unhashed(file->f_path.dentry))
  1558. goto close_fail;
  1559. /* AK: actually i see no reason to not allow this for named pipes etc.,
  1560. but keep the previous behaviour for now. */
  1561. if (!ispipe && !S_ISREG(inode->i_mode))
  1562. goto close_fail;
  1563. if (!file->f_op)
  1564. goto close_fail;
  1565. if (!file->f_op->write)
  1566. goto close_fail;
  1567. if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
  1568. goto close_fail;
  1569. retval = binfmt->core_dump(signr, regs, file, core_limit);
  1570. if (retval)
  1571. current->signal->group_exit_code |= 0x80;
  1572. close_fail:
  1573. filp_close(file, NULL);
  1574. fail_unlock:
  1575. if (helper_argv)
  1576. argv_free(helper_argv);
  1577. current->fsuid = fsuid;
  1578. complete_all(&mm->core_done);
  1579. fail:
  1580. return retval;
  1581. }