yama_lsm.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. /*
  2. * Yama Linux Security Module
  3. *
  4. * Author: Kees Cook <keescook@chromium.org>
  5. *
  6. * Copyright (C) 2010 Canonical, Ltd.
  7. * Copyright (C) 2011 The Chromium OS Authors.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. */
  14. #include <linux/security.h>
  15. #include <linux/sysctl.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/prctl.h>
  18. #include <linux/ratelimit.h>
  19. #include <linux/workqueue.h>
  20. #define YAMA_SCOPE_DISABLED 0
  21. #define YAMA_SCOPE_RELATIONAL 1
  22. #define YAMA_SCOPE_CAPABILITY 2
  23. #define YAMA_SCOPE_NO_ATTACH 3
  24. static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
  25. /* describe a ptrace relationship for potential exception */
  26. struct ptrace_relation {
  27. struct task_struct *tracer;
  28. struct task_struct *tracee;
  29. bool invalid;
  30. struct list_head node;
  31. struct rcu_head rcu;
  32. };
  33. static LIST_HEAD(ptracer_relations);
  34. static DEFINE_SPINLOCK(ptracer_relations_lock);
  35. static void yama_relation_cleanup(struct work_struct *work);
  36. static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
  37. /**
  38. * yama_relation_cleanup - remove invalid entries from the relation list
  39. *
  40. */
  41. static void yama_relation_cleanup(struct work_struct *work)
  42. {
  43. struct ptrace_relation *relation;
  44. spin_lock(&ptracer_relations_lock);
  45. rcu_read_lock();
  46. list_for_each_entry_rcu(relation, &ptracer_relations, node) {
  47. if (relation->invalid) {
  48. list_del_rcu(&relation->node);
  49. kfree_rcu(relation, rcu);
  50. }
  51. }
  52. rcu_read_unlock();
  53. spin_unlock(&ptracer_relations_lock);
  54. }
  55. /**
  56. * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
  57. * @tracer: the task_struct of the process doing the ptrace
  58. * @tracee: the task_struct of the process to be ptraced
  59. *
  60. * Each tracee can have, at most, one tracer registered. Each time this
  61. * is called, the prior registered tracer will be replaced for the tracee.
  62. *
  63. * Returns 0 if relationship was added, -ve on error.
  64. */
  65. static int yama_ptracer_add(struct task_struct *tracer,
  66. struct task_struct *tracee)
  67. {
  68. struct ptrace_relation *relation, *added;
  69. added = kmalloc(sizeof(*added), GFP_KERNEL);
  70. if (!added)
  71. return -ENOMEM;
  72. added->tracee = tracee;
  73. added->tracer = tracer;
  74. added->invalid = false;
  75. spin_lock(&ptracer_relations_lock);
  76. rcu_read_lock();
  77. list_for_each_entry_rcu(relation, &ptracer_relations, node) {
  78. if (relation->invalid)
  79. continue;
  80. if (relation->tracee == tracee) {
  81. list_replace_rcu(&relation->node, &added->node);
  82. kfree_rcu(relation, rcu);
  83. goto out;
  84. }
  85. }
  86. list_add_rcu(&added->node, &ptracer_relations);
  87. out:
  88. rcu_read_unlock();
  89. spin_unlock(&ptracer_relations_lock);
  90. return 0;
  91. }
  92. /**
  93. * yama_ptracer_del - remove exceptions related to the given tasks
  94. * @tracer: remove any relation where tracer task matches
  95. * @tracee: remove any relation where tracee task matches
  96. */
  97. static void yama_ptracer_del(struct task_struct *tracer,
  98. struct task_struct *tracee)
  99. {
  100. struct ptrace_relation *relation;
  101. bool marked = false;
  102. rcu_read_lock();
  103. list_for_each_entry_rcu(relation, &ptracer_relations, node) {
  104. if (relation->invalid)
  105. continue;
  106. if (relation->tracee == tracee ||
  107. (tracer && relation->tracer == tracer)) {
  108. relation->invalid = true;
  109. marked = true;
  110. }
  111. }
  112. rcu_read_unlock();
  113. if (marked)
  114. schedule_work(&yama_relation_work);
  115. }
  116. /**
  117. * yama_task_free - check for task_pid to remove from exception list
  118. * @task: task being removed
  119. */
  120. void yama_task_free(struct task_struct *task)
  121. {
  122. yama_ptracer_del(task, task);
  123. }
  124. /**
  125. * yama_task_prctl - check for Yama-specific prctl operations
  126. * @option: operation
  127. * @arg2: argument
  128. * @arg3: argument
  129. * @arg4: argument
  130. * @arg5: argument
  131. *
  132. * Return 0 on success, -ve on error. -ENOSYS is returned when Yama
  133. * does not handle the given option.
  134. */
  135. int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
  136. unsigned long arg4, unsigned long arg5)
  137. {
  138. int rc;
  139. struct task_struct *myself = current;
  140. rc = cap_task_prctl(option, arg2, arg3, arg4, arg5);
  141. if (rc != -ENOSYS)
  142. return rc;
  143. switch (option) {
  144. case PR_SET_PTRACER:
  145. /* Since a thread can call prctl(), find the group leader
  146. * before calling _add() or _del() on it, since we want
  147. * process-level granularity of control. The tracer group
  148. * leader checking is handled later when walking the ancestry
  149. * at the time of PTRACE_ATTACH check.
  150. */
  151. rcu_read_lock();
  152. if (!thread_group_leader(myself))
  153. myself = rcu_dereference(myself->group_leader);
  154. get_task_struct(myself);
  155. rcu_read_unlock();
  156. if (arg2 == 0) {
  157. yama_ptracer_del(NULL, myself);
  158. rc = 0;
  159. } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
  160. rc = yama_ptracer_add(NULL, myself);
  161. } else {
  162. struct task_struct *tracer;
  163. rcu_read_lock();
  164. tracer = find_task_by_vpid(arg2);
  165. if (tracer)
  166. get_task_struct(tracer);
  167. else
  168. rc = -EINVAL;
  169. rcu_read_unlock();
  170. if (tracer) {
  171. rc = yama_ptracer_add(tracer, myself);
  172. put_task_struct(tracer);
  173. }
  174. }
  175. put_task_struct(myself);
  176. break;
  177. }
  178. return rc;
  179. }
  180. /**
  181. * task_is_descendant - walk up a process family tree looking for a match
  182. * @parent: the process to compare against while walking up from child
  183. * @child: the process to start from while looking upwards for parent
  184. *
  185. * Returns 1 if child is a descendant of parent, 0 if not.
  186. */
  187. static int task_is_descendant(struct task_struct *parent,
  188. struct task_struct *child)
  189. {
  190. int rc = 0;
  191. struct task_struct *walker = child;
  192. if (!parent || !child)
  193. return 0;
  194. rcu_read_lock();
  195. if (!thread_group_leader(parent))
  196. parent = rcu_dereference(parent->group_leader);
  197. while (walker->pid > 0) {
  198. if (!thread_group_leader(walker))
  199. walker = rcu_dereference(walker->group_leader);
  200. if (walker == parent) {
  201. rc = 1;
  202. break;
  203. }
  204. walker = rcu_dereference(walker->real_parent);
  205. }
  206. rcu_read_unlock();
  207. return rc;
  208. }
  209. /**
  210. * ptracer_exception_found - tracer registered as exception for this tracee
  211. * @tracer: the task_struct of the process attempting ptrace
  212. * @tracee: the task_struct of the process to be ptraced
  213. *
  214. * Returns 1 if tracer has is ptracer exception ancestor for tracee.
  215. */
  216. static int ptracer_exception_found(struct task_struct *tracer,
  217. struct task_struct *tracee)
  218. {
  219. int rc = 0;
  220. struct ptrace_relation *relation;
  221. struct task_struct *parent = NULL;
  222. bool found = false;
  223. rcu_read_lock();
  224. if (!thread_group_leader(tracee))
  225. tracee = rcu_dereference(tracee->group_leader);
  226. list_for_each_entry_rcu(relation, &ptracer_relations, node) {
  227. if (relation->invalid)
  228. continue;
  229. if (relation->tracee == tracee) {
  230. parent = relation->tracer;
  231. found = true;
  232. break;
  233. }
  234. }
  235. if (found && (parent == NULL || task_is_descendant(parent, tracer)))
  236. rc = 1;
  237. rcu_read_unlock();
  238. return rc;
  239. }
  240. /**
  241. * yama_ptrace_access_check - validate PTRACE_ATTACH calls
  242. * @child: task that current task is attempting to ptrace
  243. * @mode: ptrace attach mode
  244. *
  245. * Returns 0 if following the ptrace is allowed, -ve on error.
  246. */
  247. int yama_ptrace_access_check(struct task_struct *child,
  248. unsigned int mode)
  249. {
  250. int rc;
  251. /* If standard caps disallows it, so does Yama. We should
  252. * only tighten restrictions further.
  253. */
  254. rc = cap_ptrace_access_check(child, mode);
  255. if (rc)
  256. return rc;
  257. /* require ptrace target be a child of ptracer on attach */
  258. if (mode == PTRACE_MODE_ATTACH) {
  259. switch (ptrace_scope) {
  260. case YAMA_SCOPE_DISABLED:
  261. /* No additional restrictions. */
  262. break;
  263. case YAMA_SCOPE_RELATIONAL:
  264. rcu_read_lock();
  265. if (!task_is_descendant(current, child) &&
  266. !ptracer_exception_found(current, child) &&
  267. !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
  268. rc = -EPERM;
  269. rcu_read_unlock();
  270. break;
  271. case YAMA_SCOPE_CAPABILITY:
  272. rcu_read_lock();
  273. if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
  274. rc = -EPERM;
  275. rcu_read_unlock();
  276. break;
  277. case YAMA_SCOPE_NO_ATTACH:
  278. default:
  279. rc = -EPERM;
  280. break;
  281. }
  282. }
  283. if (rc) {
  284. printk_ratelimited(KERN_NOTICE
  285. "ptrace of pid %d was attempted by: %s (pid %d)\n",
  286. child->pid, current->comm, current->pid);
  287. }
  288. return rc;
  289. }
  290. /**
  291. * yama_ptrace_traceme - validate PTRACE_TRACEME calls
  292. * @parent: task that will become the ptracer of the current task
  293. *
  294. * Returns 0 if following the ptrace is allowed, -ve on error.
  295. */
  296. int yama_ptrace_traceme(struct task_struct *parent)
  297. {
  298. int rc;
  299. /* If standard caps disallows it, so does Yama. We should
  300. * only tighten restrictions further.
  301. */
  302. rc = cap_ptrace_traceme(parent);
  303. if (rc)
  304. return rc;
  305. /* Only disallow PTRACE_TRACEME on more aggressive settings. */
  306. switch (ptrace_scope) {
  307. case YAMA_SCOPE_CAPABILITY:
  308. if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE))
  309. rc = -EPERM;
  310. break;
  311. case YAMA_SCOPE_NO_ATTACH:
  312. rc = -EPERM;
  313. break;
  314. }
  315. if (rc) {
  316. printk_ratelimited(KERN_NOTICE
  317. "ptraceme of pid %d was attempted by: %s (pid %d)\n",
  318. current->pid, parent->comm, parent->pid);
  319. }
  320. return rc;
  321. }
  322. #ifndef CONFIG_SECURITY_YAMA_STACKED
  323. static struct security_operations yama_ops = {
  324. .name = "yama",
  325. .ptrace_access_check = yama_ptrace_access_check,
  326. .ptrace_traceme = yama_ptrace_traceme,
  327. .task_prctl = yama_task_prctl,
  328. .task_free = yama_task_free,
  329. };
  330. #endif
  331. #ifdef CONFIG_SYSCTL
  332. static int yama_dointvec_minmax(struct ctl_table *table, int write,
  333. void __user *buffer, size_t *lenp, loff_t *ppos)
  334. {
  335. int rc;
  336. if (write && !capable(CAP_SYS_PTRACE))
  337. return -EPERM;
  338. rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  339. if (rc)
  340. return rc;
  341. /* Lock the max value if it ever gets set. */
  342. if (write && *(int *)table->data == *(int *)table->extra2)
  343. table->extra1 = table->extra2;
  344. return rc;
  345. }
  346. static int zero;
  347. static int max_scope = YAMA_SCOPE_NO_ATTACH;
  348. struct ctl_path yama_sysctl_path[] = {
  349. { .procname = "kernel", },
  350. { .procname = "yama", },
  351. { }
  352. };
  353. static struct ctl_table yama_sysctl_table[] = {
  354. {
  355. .procname = "ptrace_scope",
  356. .data = &ptrace_scope,
  357. .maxlen = sizeof(int),
  358. .mode = 0644,
  359. .proc_handler = yama_dointvec_minmax,
  360. .extra1 = &zero,
  361. .extra2 = &max_scope,
  362. },
  363. { }
  364. };
  365. #endif /* CONFIG_SYSCTL */
  366. static __init int yama_init(void)
  367. {
  368. #ifndef CONFIG_SECURITY_YAMA_STACKED
  369. if (!security_module_enable(&yama_ops))
  370. return 0;
  371. #endif
  372. printk(KERN_INFO "Yama: becoming mindful.\n");
  373. #ifndef CONFIG_SECURITY_YAMA_STACKED
  374. if (register_security(&yama_ops))
  375. panic("Yama: kernel registration failed.\n");
  376. #endif
  377. #ifdef CONFIG_SYSCTL
  378. if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
  379. panic("Yama: sysctl registration failed.\n");
  380. #endif
  381. return 0;
  382. }
  383. security_initcall(yama_init);