garbage.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. /*
  2. * NET3: Garbage Collector For AF_UNIX sockets
  3. *
  4. * Garbage Collector:
  5. * Copyright (C) Barak A. Pearlmutter.
  6. * Released under the GPL version 2 or later.
  7. *
  8. * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
  9. * If it doesn't work blame me, it worked when Barak sent it.
  10. *
  11. * Assumptions:
  12. *
  13. * - object w/ a bit
  14. * - free list
  15. *
  16. * Current optimizations:
  17. *
  18. * - explicit stack instead of recursion
  19. * - tail recurse on first born instead of immediate push/pop
  20. * - we gather the stuff that should not be killed into tree
  21. * and stack is just a path from root to the current pointer.
  22. *
  23. * Future optimizations:
  24. *
  25. * - don't just push entire root set; process in place
  26. *
  27. * This program is free software; you can redistribute it and/or
  28. * modify it under the terms of the GNU General Public License
  29. * as published by the Free Software Foundation; either version
  30. * 2 of the License, or (at your option) any later version.
  31. *
  32. * Fixes:
  33. * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
  34. * Cope with changing max_files.
  35. * Al Viro 11 Oct 1998
  36. * Graph may have cycles. That is, we can send the descriptor
  37. * of foo to bar and vice versa. Current code chokes on that.
  38. * Fix: move SCM_RIGHTS ones into the separate list and then
  39. * skb_free() them all instead of doing explicit fput's.
  40. * Another problem: since fput() may block somebody may
  41. * create a new unix_socket when we are in the middle of sweep
  42. * phase. Fix: revert the logic wrt MARKED. Mark everything
  43. * upon the beginning and unmark non-junk ones.
  44. *
  45. * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
  46. * sent to connect()'ed but still not accept()'ed sockets.
  47. * Fixed. Old code had slightly different problem here:
  48. * extra fput() in situation when we passed the descriptor via
  49. * such socket and closed it (descriptor). That would happen on
  50. * each unix_gc() until the accept(). Since the struct file in
  51. * question would go to the free list and might be reused...
  52. * That might be the reason of random oopses on filp_close()
  53. * in unrelated processes.
  54. *
  55. * AV 28 Feb 1999
  56. * Kill the explicit allocation of stack. Now we keep the tree
  57. * with root in dummy + pointer (gc_current) to one of the nodes.
  58. * Stack is represented as path from gc_current to dummy. Unmark
  59. * now means "add to tree". Push == "make it a son of gc_current".
  60. * Pop == "move gc_current to parent". We keep only pointers to
  61. * parents (->gc_tree).
  62. * AV 1 Mar 1999
  63. * Damn. Added missing check for ->dead in listen queues scanning.
  64. *
  65. * Miklos Szeredi 25 Jun 2007
  66. * Reimplement with a cycle collecting algorithm. This should
  67. * solve several problems with the previous code, like being racy
  68. * wrt receive and holding up unrelated socket operations.
  69. */
  70. #include <linux/kernel.h>
  71. #include <linux/string.h>
  72. #include <linux/socket.h>
  73. #include <linux/un.h>
  74. #include <linux/net.h>
  75. #include <linux/fs.h>
  76. #include <linux/skbuff.h>
  77. #include <linux/netdevice.h>
  78. #include <linux/file.h>
  79. #include <linux/proc_fs.h>
  80. #include <linux/mutex.h>
  81. #include <linux/wait.h>
  82. #include <net/sock.h>
  83. #include <net/af_unix.h>
  84. #include <net/scm.h>
  85. #include <net/tcp_states.h>
  86. /* Internal data structures and random procedures: */
  87. static LIST_HEAD(gc_inflight_list);
  88. static LIST_HEAD(gc_candidates);
  89. static DEFINE_SPINLOCK(unix_gc_lock);
  90. static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
  91. unsigned int unix_tot_inflight;
  92. static struct sock *unix_get_socket(struct file *filp)
  93. {
  94. struct sock *u_sock = NULL;
  95. struct inode *inode = filp->f_path.dentry->d_inode;
  96. /*
  97. * Socket ?
  98. */
  99. if (S_ISSOCK(inode->i_mode)) {
  100. struct socket *sock = SOCKET_I(inode);
  101. struct sock *s = sock->sk;
  102. /*
  103. * PF_UNIX ?
  104. */
  105. if (s && sock->ops && sock->ops->family == PF_UNIX)
  106. u_sock = s;
  107. }
  108. return u_sock;
  109. }
  110. /*
  111. * Keep the number of times in flight count for the file
  112. * descriptor if it is for an AF_UNIX socket.
  113. */
  114. void unix_inflight(struct file *fp)
  115. {
  116. struct sock *s = unix_get_socket(fp);
  117. if (s) {
  118. struct unix_sock *u = unix_sk(s);
  119. spin_lock(&unix_gc_lock);
  120. if (atomic_long_inc_return(&u->inflight) == 1) {
  121. BUG_ON(!list_empty(&u->link));
  122. list_add_tail(&u->link, &gc_inflight_list);
  123. } else {
  124. BUG_ON(list_empty(&u->link));
  125. }
  126. unix_tot_inflight++;
  127. spin_unlock(&unix_gc_lock);
  128. }
  129. }
  130. void unix_notinflight(struct file *fp)
  131. {
  132. struct sock *s = unix_get_socket(fp);
  133. if (s) {
  134. struct unix_sock *u = unix_sk(s);
  135. spin_lock(&unix_gc_lock);
  136. BUG_ON(list_empty(&u->link));
  137. if (atomic_long_dec_and_test(&u->inflight))
  138. list_del_init(&u->link);
  139. unix_tot_inflight--;
  140. spin_unlock(&unix_gc_lock);
  141. }
  142. }
  143. static inline struct sk_buff *sock_queue_head(struct sock *sk)
  144. {
  145. return (struct sk_buff *)&sk->sk_receive_queue;
  146. }
  147. #define receive_queue_for_each_skb(sk, next, skb) \
  148. for (skb = sock_queue_head(sk)->next, next = skb->next; \
  149. skb != sock_queue_head(sk); skb = next, next = skb->next)
  150. static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
  151. struct sk_buff_head *hitlist)
  152. {
  153. struct sk_buff *skb;
  154. struct sk_buff *next;
  155. spin_lock(&x->sk_receive_queue.lock);
  156. receive_queue_for_each_skb(x, next, skb) {
  157. /*
  158. * Do we have file descriptors ?
  159. */
  160. if (UNIXCB(skb).fp) {
  161. bool hit = false;
  162. /*
  163. * Process the descriptors of this socket
  164. */
  165. int nfd = UNIXCB(skb).fp->count;
  166. struct file **fp = UNIXCB(skb).fp->fp;
  167. while (nfd--) {
  168. /*
  169. * Get the socket the fd matches
  170. * if it indeed does so
  171. */
  172. struct sock *sk = unix_get_socket(*fp++);
  173. if (sk) {
  174. struct unix_sock *u = unix_sk(sk);
  175. /*
  176. * Ignore non-candidates, they could
  177. * have been added to the queues after
  178. * starting the garbage collection
  179. */
  180. if (u->gc_candidate) {
  181. hit = true;
  182. func(u);
  183. }
  184. }
  185. }
  186. if (hit && hitlist != NULL) {
  187. __skb_unlink(skb, &x->sk_receive_queue);
  188. __skb_queue_tail(hitlist, skb);
  189. }
  190. }
  191. }
  192. spin_unlock(&x->sk_receive_queue.lock);
  193. }
  194. static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
  195. struct sk_buff_head *hitlist)
  196. {
  197. if (x->sk_state != TCP_LISTEN)
  198. scan_inflight(x, func, hitlist);
  199. else {
  200. struct sk_buff *skb;
  201. struct sk_buff *next;
  202. struct unix_sock *u;
  203. LIST_HEAD(embryos);
  204. /*
  205. * For a listening socket collect the queued embryos
  206. * and perform a scan on them as well.
  207. */
  208. spin_lock(&x->sk_receive_queue.lock);
  209. receive_queue_for_each_skb(x, next, skb) {
  210. u = unix_sk(skb->sk);
  211. /*
  212. * An embryo cannot be in-flight, so it's safe
  213. * to use the list link.
  214. */
  215. BUG_ON(!list_empty(&u->link));
  216. list_add_tail(&u->link, &embryos);
  217. }
  218. spin_unlock(&x->sk_receive_queue.lock);
  219. while (!list_empty(&embryos)) {
  220. u = list_entry(embryos.next, struct unix_sock, link);
  221. scan_inflight(&u->sk, func, hitlist);
  222. list_del_init(&u->link);
  223. }
  224. }
  225. }
  226. static void dec_inflight(struct unix_sock *usk)
  227. {
  228. atomic_long_dec(&usk->inflight);
  229. }
  230. static void inc_inflight(struct unix_sock *usk)
  231. {
  232. atomic_long_inc(&usk->inflight);
  233. }
  234. static void inc_inflight_move_tail(struct unix_sock *u)
  235. {
  236. atomic_long_inc(&u->inflight);
  237. /*
  238. * If this still might be part of a cycle, move it to the end
  239. * of the list, so that it's checked even if it was already
  240. * passed over
  241. */
  242. if (u->gc_maybe_cycle)
  243. list_move_tail(&u->link, &gc_candidates);
  244. }
  245. static bool gc_in_progress = false;
  246. void wait_for_unix_gc(void)
  247. {
  248. wait_event(unix_gc_wait, gc_in_progress == false);
  249. }
  250. /* The external entry point: unix_gc() */
  251. void unix_gc(void)
  252. {
  253. struct unix_sock *u;
  254. struct unix_sock *next;
  255. struct sk_buff_head hitlist;
  256. struct list_head cursor;
  257. LIST_HEAD(not_cycle_list);
  258. spin_lock(&unix_gc_lock);
  259. /* Avoid a recursive GC. */
  260. if (gc_in_progress)
  261. goto out;
  262. gc_in_progress = true;
  263. /*
  264. * First, select candidates for garbage collection. Only
  265. * in-flight sockets are considered, and from those only ones
  266. * which don't have any external reference.
  267. *
  268. * Holding unix_gc_lock will protect these candidates from
  269. * being detached, and hence from gaining an external
  270. * reference. Since there are no possible receivers, all
  271. * buffers currently on the candidates' queues stay there
  272. * during the garbage collection.
  273. *
  274. * We also know that no new candidate can be added onto the
  275. * receive queues. Other, non candidate sockets _can_ be
  276. * added to queue, so we must make sure only to touch
  277. * candidates.
  278. */
  279. list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
  280. long total_refs;
  281. long inflight_refs;
  282. total_refs = file_count(u->sk.sk_socket->file);
  283. inflight_refs = atomic_long_read(&u->inflight);
  284. BUG_ON(inflight_refs < 1);
  285. BUG_ON(total_refs < inflight_refs);
  286. if (total_refs == inflight_refs) {
  287. list_move_tail(&u->link, &gc_candidates);
  288. u->gc_candidate = 1;
  289. u->gc_maybe_cycle = 1;
  290. }
  291. }
  292. /*
  293. * Now remove all internal in-flight reference to children of
  294. * the candidates.
  295. */
  296. list_for_each_entry(u, &gc_candidates, link)
  297. scan_children(&u->sk, dec_inflight, NULL);
  298. /*
  299. * Restore the references for children of all candidates,
  300. * which have remaining references. Do this recursively, so
  301. * only those remain, which form cyclic references.
  302. *
  303. * Use a "cursor" link, to make the list traversal safe, even
  304. * though elements might be moved about.
  305. */
  306. list_add(&cursor, &gc_candidates);
  307. while (cursor.next != &gc_candidates) {
  308. u = list_entry(cursor.next, struct unix_sock, link);
  309. /* Move cursor to after the current position. */
  310. list_move(&cursor, &u->link);
  311. if (atomic_long_read(&u->inflight) > 0) {
  312. list_move_tail(&u->link, &not_cycle_list);
  313. u->gc_maybe_cycle = 0;
  314. scan_children(&u->sk, inc_inflight_move_tail, NULL);
  315. }
  316. }
  317. list_del(&cursor);
  318. /*
  319. * not_cycle_list contains those sockets which do not make up a
  320. * cycle. Restore these to the inflight list.
  321. */
  322. while (!list_empty(&not_cycle_list)) {
  323. u = list_entry(not_cycle_list.next, struct unix_sock, link);
  324. u->gc_candidate = 0;
  325. list_move_tail(&u->link, &gc_inflight_list);
  326. }
  327. /*
  328. * Now gc_candidates contains only garbage. Restore original
  329. * inflight counters for these as well, and remove the skbuffs
  330. * which are creating the cycle(s).
  331. */
  332. skb_queue_head_init(&hitlist);
  333. list_for_each_entry(u, &gc_candidates, link)
  334. scan_children(&u->sk, inc_inflight, &hitlist);
  335. spin_unlock(&unix_gc_lock);
  336. /* Here we are. Hitlist is filled. Die. */
  337. __skb_queue_purge(&hitlist);
  338. spin_lock(&unix_gc_lock);
  339. /* All candidates should have been detached by now. */
  340. BUG_ON(!list_empty(&gc_candidates));
  341. gc_in_progress = false;
  342. wake_up(&unix_gc_wait);
  343. out:
  344. spin_unlock(&unix_gc_lock);
  345. }