generic.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744
  1. /*
  2. * proc/fs/generic.c --- generic routines for the proc-fs
  3. *
  4. * This file contains generic proc-fs routines for handling
  5. * directories and files.
  6. *
  7. * Copyright (C) 1991, 1992 Linus Torvalds.
  8. * Copyright (C) 1997 Theodore Ts'o
  9. */
  10. #include <linux/errno.h>
  11. #include <linux/time.h>
  12. #include <linux/proc_fs.h>
  13. #include <linux/stat.h>
  14. #include <linux/module.h>
  15. #include <linux/mount.h>
  16. #include <linux/smp_lock.h>
  17. #include <linux/init.h>
  18. #include <linux/idr.h>
  19. #include <linux/namei.h>
  20. #include <linux/bitops.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/completion.h>
  23. #include <asm/uaccess.h>
  24. #include "internal.h"
  25. static ssize_t proc_file_read(struct file *file, char __user *buf,
  26. size_t nbytes, loff_t *ppos);
  27. static ssize_t proc_file_write(struct file *file, const char __user *buffer,
  28. size_t count, loff_t *ppos);
  29. static loff_t proc_file_lseek(struct file *, loff_t, int);
  30. DEFINE_SPINLOCK(proc_subdir_lock);
  31. static int proc_match(int len, const char *name, struct proc_dir_entry *de)
  32. {
  33. if (de->namelen != len)
  34. return 0;
  35. return !memcmp(name, de->name, len);
  36. }
  37. static const struct file_operations proc_file_operations = {
  38. .llseek = proc_file_lseek,
  39. .read = proc_file_read,
  40. .write = proc_file_write,
  41. };
  42. /* buffer size is one page but our output routines use some slack for overruns */
  43. #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
  44. static ssize_t
  45. proc_file_read(struct file *file, char __user *buf, size_t nbytes,
  46. loff_t *ppos)
  47. {
  48. struct inode * inode = file->f_path.dentry->d_inode;
  49. char *page;
  50. ssize_t retval=0;
  51. int eof=0;
  52. ssize_t n, count;
  53. char *start;
  54. struct proc_dir_entry * dp;
  55. unsigned long long pos;
  56. /*
  57. * Gaah, please just use "seq_file" instead. The legacy /proc
  58. * interfaces cut loff_t down to off_t for reads, and ignore
  59. * the offset entirely for writes..
  60. */
  61. pos = *ppos;
  62. if (pos > MAX_NON_LFS)
  63. return 0;
  64. if (nbytes > MAX_NON_LFS - pos)
  65. nbytes = MAX_NON_LFS - pos;
  66. dp = PDE(inode);
  67. if (!(page = (char*) __get_free_page(GFP_TEMPORARY)))
  68. return -ENOMEM;
  69. while ((nbytes > 0) && !eof) {
  70. count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
  71. start = NULL;
  72. if (dp->get_info) {
  73. /* Handle old net routines */
  74. n = dp->get_info(page, &start, *ppos, count);
  75. if (n < count)
  76. eof = 1;
  77. } else if (dp->read_proc) {
  78. /*
  79. * How to be a proc read function
  80. * ------------------------------
  81. * Prototype:
  82. * int f(char *buffer, char **start, off_t offset,
  83. * int count, int *peof, void *dat)
  84. *
  85. * Assume that the buffer is "count" bytes in size.
  86. *
  87. * If you know you have supplied all the data you
  88. * have, set *peof.
  89. *
  90. * You have three ways to return data:
  91. * 0) Leave *start = NULL. (This is the default.)
  92. * Put the data of the requested offset at that
  93. * offset within the buffer. Return the number (n)
  94. * of bytes there are from the beginning of the
  95. * buffer up to the last byte of data. If the
  96. * number of supplied bytes (= n - offset) is
  97. * greater than zero and you didn't signal eof
  98. * and the reader is prepared to take more data
  99. * you will be called again with the requested
  100. * offset advanced by the number of bytes
  101. * absorbed. This interface is useful for files
  102. * no larger than the buffer.
  103. * 1) Set *start = an unsigned long value less than
  104. * the buffer address but greater than zero.
  105. * Put the data of the requested offset at the
  106. * beginning of the buffer. Return the number of
  107. * bytes of data placed there. If this number is
  108. * greater than zero and you didn't signal eof
  109. * and the reader is prepared to take more data
  110. * you will be called again with the requested
  111. * offset advanced by *start. This interface is
  112. * useful when you have a large file consisting
  113. * of a series of blocks which you want to count
  114. * and return as wholes.
  115. * (Hack by Paul.Russell@rustcorp.com.au)
  116. * 2) Set *start = an address within the buffer.
  117. * Put the data of the requested offset at *start.
  118. * Return the number of bytes of data placed there.
  119. * If this number is greater than zero and you
  120. * didn't signal eof and the reader is prepared to
  121. * take more data you will be called again with the
  122. * requested offset advanced by the number of bytes
  123. * absorbed.
  124. */
  125. n = dp->read_proc(page, &start, *ppos,
  126. count, &eof, dp->data);
  127. } else
  128. break;
  129. if (n == 0) /* end of file */
  130. break;
  131. if (n < 0) { /* error */
  132. if (retval == 0)
  133. retval = n;
  134. break;
  135. }
  136. if (start == NULL) {
  137. if (n > PAGE_SIZE) {
  138. printk(KERN_ERR
  139. "proc_file_read: Apparent buffer overflow!\n");
  140. n = PAGE_SIZE;
  141. }
  142. n -= *ppos;
  143. if (n <= 0)
  144. break;
  145. if (n > count)
  146. n = count;
  147. start = page + *ppos;
  148. } else if (start < page) {
  149. if (n > PAGE_SIZE) {
  150. printk(KERN_ERR
  151. "proc_file_read: Apparent buffer overflow!\n");
  152. n = PAGE_SIZE;
  153. }
  154. if (n > count) {
  155. /*
  156. * Don't reduce n because doing so might
  157. * cut off part of a data block.
  158. */
  159. printk(KERN_WARNING
  160. "proc_file_read: Read count exceeded\n");
  161. }
  162. } else /* start >= page */ {
  163. unsigned long startoff = (unsigned long)(start - page);
  164. if (n > (PAGE_SIZE - startoff)) {
  165. printk(KERN_ERR
  166. "proc_file_read: Apparent buffer overflow!\n");
  167. n = PAGE_SIZE - startoff;
  168. }
  169. if (n > count)
  170. n = count;
  171. }
  172. n -= copy_to_user(buf, start < page ? page : start, n);
  173. if (n == 0) {
  174. if (retval == 0)
  175. retval = -EFAULT;
  176. break;
  177. }
  178. *ppos += start < page ? (unsigned long)start : n;
  179. nbytes -= n;
  180. buf += n;
  181. retval += n;
  182. }
  183. free_page((unsigned long) page);
  184. return retval;
  185. }
  186. static ssize_t
  187. proc_file_write(struct file *file, const char __user *buffer,
  188. size_t count, loff_t *ppos)
  189. {
  190. struct inode *inode = file->f_path.dentry->d_inode;
  191. struct proc_dir_entry * dp;
  192. dp = PDE(inode);
  193. if (!dp->write_proc)
  194. return -EIO;
  195. /* FIXME: does this routine need ppos? probably... */
  196. return dp->write_proc(file, buffer, count, dp->data);
  197. }
  198. static loff_t
  199. proc_file_lseek(struct file *file, loff_t offset, int orig)
  200. {
  201. loff_t retval = -EINVAL;
  202. switch (orig) {
  203. case 1:
  204. offset += file->f_pos;
  205. /* fallthrough */
  206. case 0:
  207. if (offset < 0 || offset > MAX_NON_LFS)
  208. break;
  209. file->f_pos = retval = offset;
  210. }
  211. return retval;
  212. }
  213. static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
  214. {
  215. struct inode *inode = dentry->d_inode;
  216. struct proc_dir_entry *de = PDE(inode);
  217. int error;
  218. error = inode_change_ok(inode, iattr);
  219. if (error)
  220. goto out;
  221. error = inode_setattr(inode, iattr);
  222. if (error)
  223. goto out;
  224. de->uid = inode->i_uid;
  225. de->gid = inode->i_gid;
  226. de->mode = inode->i_mode;
  227. out:
  228. return error;
  229. }
  230. static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
  231. struct kstat *stat)
  232. {
  233. struct inode *inode = dentry->d_inode;
  234. struct proc_dir_entry *de = PROC_I(inode)->pde;
  235. if (de && de->nlink)
  236. inode->i_nlink = de->nlink;
  237. generic_fillattr(inode, stat);
  238. return 0;
  239. }
  240. static const struct inode_operations proc_file_inode_operations = {
  241. .setattr = proc_notify_change,
  242. };
  243. /*
  244. * This function parses a name such as "tty/driver/serial", and
  245. * returns the struct proc_dir_entry for "/proc/tty/driver", and
  246. * returns "serial" in residual.
  247. */
  248. static int xlate_proc_name(const char *name,
  249. struct proc_dir_entry **ret, const char **residual)
  250. {
  251. const char *cp = name, *next;
  252. struct proc_dir_entry *de;
  253. int len;
  254. int rtn = 0;
  255. spin_lock(&proc_subdir_lock);
  256. de = &proc_root;
  257. while (1) {
  258. next = strchr(cp, '/');
  259. if (!next)
  260. break;
  261. len = next - cp;
  262. for (de = de->subdir; de ; de = de->next) {
  263. if (proc_match(len, cp, de))
  264. break;
  265. }
  266. if (!de) {
  267. rtn = -ENOENT;
  268. goto out;
  269. }
  270. cp += len + 1;
  271. }
  272. *residual = cp;
  273. *ret = de;
  274. out:
  275. spin_unlock(&proc_subdir_lock);
  276. return rtn;
  277. }
  278. static DEFINE_IDR(proc_inum_idr);
  279. static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
  280. #define PROC_DYNAMIC_FIRST 0xF0000000UL
  281. /*
  282. * Return an inode number between PROC_DYNAMIC_FIRST and
  283. * 0xffffffff, or zero on failure.
  284. */
  285. static unsigned int get_inode_number(void)
  286. {
  287. int i, inum = 0;
  288. int error;
  289. retry:
  290. if (idr_pre_get(&proc_inum_idr, GFP_KERNEL) == 0)
  291. return 0;
  292. spin_lock(&proc_inum_lock);
  293. error = idr_get_new(&proc_inum_idr, NULL, &i);
  294. spin_unlock(&proc_inum_lock);
  295. if (error == -EAGAIN)
  296. goto retry;
  297. else if (error)
  298. return 0;
  299. inum = (i & MAX_ID_MASK) + PROC_DYNAMIC_FIRST;
  300. /* inum will never be more than 0xf0ffffff, so no check
  301. * for overflow.
  302. */
  303. return inum;
  304. }
  305. static void release_inode_number(unsigned int inum)
  306. {
  307. int id = (inum - PROC_DYNAMIC_FIRST) | ~MAX_ID_MASK;
  308. spin_lock(&proc_inum_lock);
  309. idr_remove(&proc_inum_idr, id);
  310. spin_unlock(&proc_inum_lock);
  311. }
  312. static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
  313. {
  314. nd_set_link(nd, PDE(dentry->d_inode)->data);
  315. return NULL;
  316. }
  317. static const struct inode_operations proc_link_inode_operations = {
  318. .readlink = generic_readlink,
  319. .follow_link = proc_follow_link,
  320. };
  321. /*
  322. * As some entries in /proc are volatile, we want to
  323. * get rid of unused dentries. This could be made
  324. * smarter: we could keep a "volatile" flag in the
  325. * inode to indicate which ones to keep.
  326. */
  327. static int proc_delete_dentry(struct dentry * dentry)
  328. {
  329. return 1;
  330. }
  331. static struct dentry_operations proc_dentry_operations =
  332. {
  333. .d_delete = proc_delete_dentry,
  334. };
  335. /*
  336. * Don't create negative dentries here, return -ENOENT by hand
  337. * instead.
  338. */
  339. struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
  340. {
  341. struct inode *inode = NULL;
  342. struct proc_dir_entry * de;
  343. int error = -ENOENT;
  344. lock_kernel();
  345. spin_lock(&proc_subdir_lock);
  346. de = PDE(dir);
  347. if (de) {
  348. for (de = de->subdir; de ; de = de->next) {
  349. if (de->namelen != dentry->d_name.len)
  350. continue;
  351. if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
  352. unsigned int ino;
  353. if (de->shadow_proc)
  354. de = de->shadow_proc(current, de);
  355. ino = de->low_ino;
  356. de_get(de);
  357. spin_unlock(&proc_subdir_lock);
  358. error = -EINVAL;
  359. inode = proc_get_inode(dir->i_sb, ino, de);
  360. spin_lock(&proc_subdir_lock);
  361. break;
  362. }
  363. }
  364. }
  365. spin_unlock(&proc_subdir_lock);
  366. unlock_kernel();
  367. if (inode) {
  368. dentry->d_op = &proc_dentry_operations;
  369. d_add(dentry, inode);
  370. return NULL;
  371. }
  372. de_put(de);
  373. return ERR_PTR(error);
  374. }
  375. /*
  376. * This returns non-zero if at EOF, so that the /proc
  377. * root directory can use this and check if it should
  378. * continue with the <pid> entries..
  379. *
  380. * Note that the VFS-layer doesn't care about the return
  381. * value of the readdir() call, as long as it's non-negative
  382. * for success..
  383. */
  384. int proc_readdir(struct file * filp,
  385. void * dirent, filldir_t filldir)
  386. {
  387. struct proc_dir_entry * de;
  388. unsigned int ino;
  389. int i;
  390. struct inode *inode = filp->f_path.dentry->d_inode;
  391. int ret = 0;
  392. lock_kernel();
  393. ino = inode->i_ino;
  394. de = PDE(inode);
  395. if (!de) {
  396. ret = -EINVAL;
  397. goto out;
  398. }
  399. i = filp->f_pos;
  400. switch (i) {
  401. case 0:
  402. if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
  403. goto out;
  404. i++;
  405. filp->f_pos++;
  406. /* fall through */
  407. case 1:
  408. if (filldir(dirent, "..", 2, i,
  409. parent_ino(filp->f_path.dentry),
  410. DT_DIR) < 0)
  411. goto out;
  412. i++;
  413. filp->f_pos++;
  414. /* fall through */
  415. default:
  416. spin_lock(&proc_subdir_lock);
  417. de = de->subdir;
  418. i -= 2;
  419. for (;;) {
  420. if (!de) {
  421. ret = 1;
  422. spin_unlock(&proc_subdir_lock);
  423. goto out;
  424. }
  425. if (!i)
  426. break;
  427. de = de->next;
  428. i--;
  429. }
  430. do {
  431. struct proc_dir_entry *next;
  432. /* filldir passes info to user space */
  433. de_get(de);
  434. spin_unlock(&proc_subdir_lock);
  435. if (filldir(dirent, de->name, de->namelen, filp->f_pos,
  436. de->low_ino, de->mode >> 12) < 0) {
  437. de_put(de);
  438. goto out;
  439. }
  440. spin_lock(&proc_subdir_lock);
  441. filp->f_pos++;
  442. next = de->next;
  443. de_put(de);
  444. de = next;
  445. } while (de);
  446. spin_unlock(&proc_subdir_lock);
  447. }
  448. ret = 1;
  449. out: unlock_kernel();
  450. return ret;
  451. }
  452. /*
  453. * These are the generic /proc directory operations. They
  454. * use the in-memory "struct proc_dir_entry" tree to parse
  455. * the /proc directory.
  456. */
  457. static const struct file_operations proc_dir_operations = {
  458. .read = generic_read_dir,
  459. .readdir = proc_readdir,
  460. };
  461. /*
  462. * proc directories can do almost nothing..
  463. */
  464. static const struct inode_operations proc_dir_inode_operations = {
  465. .lookup = proc_lookup,
  466. .getattr = proc_getattr,
  467. .setattr = proc_notify_change,
  468. };
  469. static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
  470. {
  471. unsigned int i;
  472. i = get_inode_number();
  473. if (i == 0)
  474. return -EAGAIN;
  475. dp->low_ino = i;
  476. if (S_ISDIR(dp->mode)) {
  477. if (dp->proc_iops == NULL) {
  478. dp->proc_fops = &proc_dir_operations;
  479. dp->proc_iops = &proc_dir_inode_operations;
  480. }
  481. dir->nlink++;
  482. } else if (S_ISLNK(dp->mode)) {
  483. if (dp->proc_iops == NULL)
  484. dp->proc_iops = &proc_link_inode_operations;
  485. } else if (S_ISREG(dp->mode)) {
  486. if (dp->proc_fops == NULL)
  487. dp->proc_fops = &proc_file_operations;
  488. if (dp->proc_iops == NULL)
  489. dp->proc_iops = &proc_file_inode_operations;
  490. }
  491. spin_lock(&proc_subdir_lock);
  492. dp->next = dir->subdir;
  493. dp->parent = dir;
  494. dir->subdir = dp;
  495. spin_unlock(&proc_subdir_lock);
  496. return 0;
  497. }
  498. static struct proc_dir_entry *proc_create(struct proc_dir_entry **parent,
  499. const char *name,
  500. mode_t mode,
  501. nlink_t nlink)
  502. {
  503. struct proc_dir_entry *ent = NULL;
  504. const char *fn = name;
  505. int len;
  506. /* make sure name is valid */
  507. if (!name || !strlen(name)) goto out;
  508. if (!(*parent) && xlate_proc_name(name, parent, &fn) != 0)
  509. goto out;
  510. /* At this point there must not be any '/' characters beyond *fn */
  511. if (strchr(fn, '/'))
  512. goto out;
  513. len = strlen(fn);
  514. ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
  515. if (!ent) goto out;
  516. memset(ent, 0, sizeof(struct proc_dir_entry));
  517. memcpy(((char *) ent) + sizeof(struct proc_dir_entry), fn, len + 1);
  518. ent->name = ((char *) ent) + sizeof(*ent);
  519. ent->namelen = len;
  520. ent->mode = mode;
  521. ent->nlink = nlink;
  522. atomic_set(&ent->count, 1);
  523. ent->pde_users = 0;
  524. spin_lock_init(&ent->pde_unload_lock);
  525. ent->pde_unload_completion = NULL;
  526. out:
  527. return ent;
  528. }
  529. struct proc_dir_entry *proc_symlink(const char *name,
  530. struct proc_dir_entry *parent, const char *dest)
  531. {
  532. struct proc_dir_entry *ent;
  533. ent = proc_create(&parent,name,
  534. (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1);
  535. if (ent) {
  536. ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL);
  537. if (ent->data) {
  538. strcpy((char*)ent->data,dest);
  539. if (proc_register(parent, ent) < 0) {
  540. kfree(ent->data);
  541. kfree(ent);
  542. ent = NULL;
  543. }
  544. } else {
  545. kfree(ent);
  546. ent = NULL;
  547. }
  548. }
  549. return ent;
  550. }
  551. struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
  552. struct proc_dir_entry *parent)
  553. {
  554. struct proc_dir_entry *ent;
  555. ent = proc_create(&parent, name, S_IFDIR | mode, 2);
  556. if (ent) {
  557. if (proc_register(parent, ent) < 0) {
  558. kfree(ent);
  559. ent = NULL;
  560. }
  561. }
  562. return ent;
  563. }
  564. struct proc_dir_entry *proc_mkdir(const char *name,
  565. struct proc_dir_entry *parent)
  566. {
  567. return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
  568. }
  569. struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
  570. struct proc_dir_entry *parent)
  571. {
  572. struct proc_dir_entry *ent;
  573. nlink_t nlink;
  574. if (S_ISDIR(mode)) {
  575. if ((mode & S_IALLUGO) == 0)
  576. mode |= S_IRUGO | S_IXUGO;
  577. nlink = 2;
  578. } else {
  579. if ((mode & S_IFMT) == 0)
  580. mode |= S_IFREG;
  581. if ((mode & S_IALLUGO) == 0)
  582. mode |= S_IRUGO;
  583. nlink = 1;
  584. }
  585. ent = proc_create(&parent,name,mode,nlink);
  586. if (ent) {
  587. if (proc_register(parent, ent) < 0) {
  588. kfree(ent);
  589. ent = NULL;
  590. }
  591. }
  592. return ent;
  593. }
  594. void free_proc_entry(struct proc_dir_entry *de)
  595. {
  596. unsigned int ino = de->low_ino;
  597. if (ino < PROC_DYNAMIC_FIRST)
  598. return;
  599. release_inode_number(ino);
  600. if (S_ISLNK(de->mode) && de->data)
  601. kfree(de->data);
  602. kfree(de);
  603. }
  604. /*
  605. * Remove a /proc entry and free it if it's not currently in use.
  606. */
  607. void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
  608. {
  609. struct proc_dir_entry **p;
  610. struct proc_dir_entry *de;
  611. const char *fn = name;
  612. int len;
  613. if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
  614. goto out;
  615. len = strlen(fn);
  616. spin_lock(&proc_subdir_lock);
  617. for (p = &parent->subdir; *p; p=&(*p)->next ) {
  618. if (!proc_match(len, fn, *p))
  619. continue;
  620. de = *p;
  621. *p = de->next;
  622. de->next = NULL;
  623. spin_lock(&de->pde_unload_lock);
  624. /*
  625. * Stop accepting new callers into module. If you're
  626. * dynamically allocating ->proc_fops, save a pointer somewhere.
  627. */
  628. de->proc_fops = NULL;
  629. /* Wait until all existing callers into module are done. */
  630. if (de->pde_users > 0) {
  631. DECLARE_COMPLETION_ONSTACK(c);
  632. if (!de->pde_unload_completion)
  633. de->pde_unload_completion = &c;
  634. spin_unlock(&de->pde_unload_lock);
  635. spin_unlock(&proc_subdir_lock);
  636. wait_for_completion(de->pde_unload_completion);
  637. spin_lock(&proc_subdir_lock);
  638. goto continue_removing;
  639. }
  640. spin_unlock(&de->pde_unload_lock);
  641. continue_removing:
  642. if (S_ISDIR(de->mode))
  643. parent->nlink--;
  644. de->nlink = 0;
  645. WARN_ON(de->subdir);
  646. if (atomic_dec_and_test(&de->count))
  647. free_proc_entry(de);
  648. break;
  649. }
  650. spin_unlock(&proc_subdir_lock);
  651. out:
  652. return;
  653. }