file.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. /*
  2. * linux/fs/file.c
  3. *
  4. * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
  5. *
  6. * Manage the dynamic fd arrays in the process files_struct.
  7. */
  8. #include <linux/fs.h>
  9. #include <linux/mm.h>
  10. #include <linux/time.h>
  11. #include <linux/slab.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/file.h>
  14. #include <linux/fdtable.h>
  15. #include <linux/bitops.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/rcupdate.h>
  19. #include <linux/workqueue.h>
  20. struct fdtable_defer {
  21. spinlock_t lock;
  22. struct work_struct wq;
  23. struct fdtable *next;
  24. };
  25. int sysctl_nr_open __read_mostly = 1024*1024;
  26. /*
  27. * We use this list to defer free fdtables that have vmalloced
  28. * sets/arrays. By keeping a per-cpu list, we avoid having to embed
  29. * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in
  30. * this per-task structure.
  31. */
  32. static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
  33. static inline void * alloc_fdmem(unsigned int size)
  34. {
  35. if (size <= PAGE_SIZE)
  36. return kmalloc(size, GFP_KERNEL);
  37. else
  38. return vmalloc(size);
  39. }
  40. static inline void free_fdarr(struct fdtable *fdt)
  41. {
  42. if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *)))
  43. kfree(fdt->fd);
  44. else
  45. vfree(fdt->fd);
  46. }
  47. static inline void free_fdset(struct fdtable *fdt)
  48. {
  49. if (fdt->max_fds <= (PAGE_SIZE * BITS_PER_BYTE / 2))
  50. kfree(fdt->open_fds);
  51. else
  52. vfree(fdt->open_fds);
  53. }
  54. static void free_fdtable_work(struct work_struct *work)
  55. {
  56. struct fdtable_defer *f =
  57. container_of(work, struct fdtable_defer, wq);
  58. struct fdtable *fdt;
  59. spin_lock_bh(&f->lock);
  60. fdt = f->next;
  61. f->next = NULL;
  62. spin_unlock_bh(&f->lock);
  63. while(fdt) {
  64. struct fdtable *next = fdt->next;
  65. vfree(fdt->fd);
  66. free_fdset(fdt);
  67. kfree(fdt);
  68. fdt = next;
  69. }
  70. }
  71. void free_fdtable_rcu(struct rcu_head *rcu)
  72. {
  73. struct fdtable *fdt = container_of(rcu, struct fdtable, rcu);
  74. struct fdtable_defer *fddef;
  75. BUG_ON(!fdt);
  76. if (fdt->max_fds <= NR_OPEN_DEFAULT) {
  77. /*
  78. * This fdtable is embedded in the files structure and that
  79. * structure itself is getting destroyed.
  80. */
  81. kmem_cache_free(files_cachep,
  82. container_of(fdt, struct files_struct, fdtab));
  83. return;
  84. }
  85. if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) {
  86. kfree(fdt->fd);
  87. kfree(fdt->open_fds);
  88. kfree(fdt);
  89. } else {
  90. fddef = &get_cpu_var(fdtable_defer_list);
  91. spin_lock(&fddef->lock);
  92. fdt->next = fddef->next;
  93. fddef->next = fdt;
  94. /* vmallocs are handled from the workqueue context */
  95. schedule_work(&fddef->wq);
  96. spin_unlock(&fddef->lock);
  97. put_cpu_var(fdtable_defer_list);
  98. }
  99. }
  100. /*
  101. * Expand the fdset in the files_struct. Called with the files spinlock
  102. * held for write.
  103. */
  104. static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
  105. {
  106. unsigned int cpy, set;
  107. BUG_ON(nfdt->max_fds < ofdt->max_fds);
  108. if (ofdt->max_fds == 0)
  109. return;
  110. cpy = ofdt->max_fds * sizeof(struct file *);
  111. set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
  112. memcpy(nfdt->fd, ofdt->fd, cpy);
  113. memset((char *)(nfdt->fd) + cpy, 0, set);
  114. cpy = ofdt->max_fds / BITS_PER_BYTE;
  115. set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
  116. memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
  117. memset((char *)(nfdt->open_fds) + cpy, 0, set);
  118. memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
  119. memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
  120. }
  121. static struct fdtable * alloc_fdtable(unsigned int nr)
  122. {
  123. struct fdtable *fdt;
  124. char *data;
  125. /*
  126. * Figure out how many fds we actually want to support in this fdtable.
  127. * Allocation steps are keyed to the size of the fdarray, since it
  128. * grows far faster than any of the other dynamic data. We try to fit
  129. * the fdarray into comfortable page-tuned chunks: starting at 1024B
  130. * and growing in powers of two from there on.
  131. */
  132. nr /= (1024 / sizeof(struct file *));
  133. nr = roundup_pow_of_two(nr + 1);
  134. nr *= (1024 / sizeof(struct file *));
  135. /*
  136. * Note that this can drive nr *below* what we had passed if sysctl_nr_open
  137. * had been set lower between the check in expand_files() and here. Deal
  138. * with that in caller, it's cheaper that way.
  139. *
  140. * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
  141. * bitmaps handling below becomes unpleasant, to put it mildly...
  142. */
  143. if (unlikely(nr > sysctl_nr_open))
  144. nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
  145. fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
  146. if (!fdt)
  147. goto out;
  148. fdt->max_fds = nr;
  149. data = alloc_fdmem(nr * sizeof(struct file *));
  150. if (!data)
  151. goto out_fdt;
  152. fdt->fd = (struct file **)data;
  153. data = alloc_fdmem(max_t(unsigned int,
  154. 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
  155. if (!data)
  156. goto out_arr;
  157. fdt->open_fds = (fd_set *)data;
  158. data += nr / BITS_PER_BYTE;
  159. fdt->close_on_exec = (fd_set *)data;
  160. INIT_RCU_HEAD(&fdt->rcu);
  161. fdt->next = NULL;
  162. return fdt;
  163. out_arr:
  164. free_fdarr(fdt);
  165. out_fdt:
  166. kfree(fdt);
  167. out:
  168. return NULL;
  169. }
  170. /*
  171. * Expand the file descriptor table.
  172. * This function will allocate a new fdtable and both fd array and fdset, of
  173. * the given size.
  174. * Return <0 error code on error; 1 on successful completion.
  175. * The files->file_lock should be held on entry, and will be held on exit.
  176. */
  177. static int expand_fdtable(struct files_struct *files, int nr)
  178. __releases(files->file_lock)
  179. __acquires(files->file_lock)
  180. {
  181. struct fdtable *new_fdt, *cur_fdt;
  182. spin_unlock(&files->file_lock);
  183. new_fdt = alloc_fdtable(nr);
  184. spin_lock(&files->file_lock);
  185. if (!new_fdt)
  186. return -ENOMEM;
  187. /*
  188. * extremely unlikely race - sysctl_nr_open decreased between the check in
  189. * caller and alloc_fdtable(). Cheaper to catch it here...
  190. */
  191. if (unlikely(new_fdt->max_fds <= nr)) {
  192. free_fdarr(new_fdt);
  193. free_fdset(new_fdt);
  194. kfree(new_fdt);
  195. return -EMFILE;
  196. }
  197. /*
  198. * Check again since another task may have expanded the fd table while
  199. * we dropped the lock
  200. */
  201. cur_fdt = files_fdtable(files);
  202. if (nr >= cur_fdt->max_fds) {
  203. /* Continue as planned */
  204. copy_fdtable(new_fdt, cur_fdt);
  205. rcu_assign_pointer(files->fdt, new_fdt);
  206. if (cur_fdt->max_fds > NR_OPEN_DEFAULT)
  207. free_fdtable(cur_fdt);
  208. } else {
  209. /* Somebody else expanded, so undo our attempt */
  210. free_fdarr(new_fdt);
  211. free_fdset(new_fdt);
  212. kfree(new_fdt);
  213. }
  214. return 1;
  215. }
  216. /*
  217. * Expand files.
  218. * This function will expand the file structures, if the requested size exceeds
  219. * the current capacity and there is room for expansion.
  220. * Return <0 error code on error; 0 when nothing done; 1 when files were
  221. * expanded and execution may have blocked.
  222. * The files->file_lock should be held on entry, and will be held on exit.
  223. */
  224. int expand_files(struct files_struct *files, int nr)
  225. {
  226. struct fdtable *fdt;
  227. fdt = files_fdtable(files);
  228. /* Do we need to expand? */
  229. if (nr < fdt->max_fds)
  230. return 0;
  231. /* Can we expand? */
  232. if (nr >= sysctl_nr_open)
  233. return -EMFILE;
  234. /* All good, so we try */
  235. return expand_fdtable(files, nr);
  236. }
  237. static void __devinit fdtable_defer_list_init(int cpu)
  238. {
  239. struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
  240. spin_lock_init(&fddef->lock);
  241. INIT_WORK(&fddef->wq, free_fdtable_work);
  242. fddef->next = NULL;
  243. }
  244. void __init files_defer_init(void)
  245. {
  246. int i;
  247. for_each_possible_cpu(i)
  248. fdtable_defer_list_init(i);
  249. }