dcache.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * dcache.c
  5. *
  6. * dentry cache handling code
  7. *
  8. * Copyright (C) 2002, 2004 Oracle. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2 of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public
  21. * License along with this program; if not, write to the
  22. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23. * Boston, MA 021110-1307, USA.
  24. */
  25. #include <linux/fs.h>
  26. #include <linux/types.h>
  27. #include <linux/slab.h>
  28. #include <linux/namei.h>
  29. #define MLOG_MASK_PREFIX ML_DCACHE
  30. #include <cluster/masklog.h>
  31. #include "ocfs2.h"
  32. #include "alloc.h"
  33. #include "dcache.h"
  34. #include "dlmglue.h"
  35. #include "file.h"
  36. #include "inode.h"
  37. static int ocfs2_dentry_revalidate(struct dentry *dentry,
  38. struct nameidata *nd)
  39. {
  40. struct inode *inode = dentry->d_inode;
  41. int ret = 0; /* if all else fails, just return false */
  42. struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
  43. mlog_entry("(0x%p, '%.*s')\n", dentry,
  44. dentry->d_name.len, dentry->d_name.name);
  45. /* Never trust a negative dentry - force a new lookup. */
  46. if (inode == NULL) {
  47. mlog(0, "negative dentry: %.*s\n", dentry->d_name.len,
  48. dentry->d_name.name);
  49. goto bail;
  50. }
  51. BUG_ON(!osb);
  52. if (inode == osb->root_inode || is_bad_inode(inode))
  53. goto bail;
  54. spin_lock(&OCFS2_I(inode)->ip_lock);
  55. /* did we or someone else delete this inode? */
  56. if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
  57. spin_unlock(&OCFS2_I(inode)->ip_lock);
  58. mlog(0, "inode (%llu) deleted, returning false\n",
  59. (unsigned long long)OCFS2_I(inode)->ip_blkno);
  60. goto bail;
  61. }
  62. spin_unlock(&OCFS2_I(inode)->ip_lock);
  63. /*
  64. * We don't need a cluster lock to test this because once an
  65. * inode nlink hits zero, it never goes back.
  66. */
  67. if (inode->i_nlink == 0) {
  68. mlog(0, "Inode %llu orphaned, returning false "
  69. "dir = %d\n",
  70. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  71. S_ISDIR(inode->i_mode));
  72. goto bail;
  73. }
  74. ret = 1;
  75. bail:
  76. mlog_exit(ret);
  77. return ret;
  78. }
  79. static int ocfs2_match_dentry(struct dentry *dentry,
  80. u64 parent_blkno,
  81. int skip_unhashed)
  82. {
  83. struct inode *parent;
  84. /*
  85. * ocfs2_lookup() does a d_splice_alias() _before_ attaching
  86. * to the lock data, so we skip those here, otherwise
  87. * ocfs2_dentry_attach_lock() will get its original dentry
  88. * back.
  89. */
  90. if (!dentry->d_fsdata)
  91. return 0;
  92. if (!dentry->d_parent)
  93. return 0;
  94. if (skip_unhashed && d_unhashed(dentry))
  95. return 0;
  96. parent = dentry->d_parent->d_inode;
  97. /* Negative parent dentry? */
  98. if (!parent)
  99. return 0;
  100. /* Name is in a different directory. */
  101. if (OCFS2_I(parent)->ip_blkno != parent_blkno)
  102. return 0;
  103. return 1;
  104. }
  105. /*
  106. * Walk the inode alias list, and find a dentry which has a given
  107. * parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it
  108. * is looking for a dentry_lock reference. The vote thread is looking
  109. * to unhash aliases, so we allow it to skip any that already have
  110. * that property.
  111. */
  112. struct dentry *ocfs2_find_local_alias(struct inode *inode,
  113. u64 parent_blkno,
  114. int skip_unhashed)
  115. {
  116. struct list_head *p;
  117. struct dentry *dentry = NULL;
  118. spin_lock(&dcache_lock);
  119. list_for_each(p, &inode->i_dentry) {
  120. dentry = list_entry(p, struct dentry, d_alias);
  121. if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
  122. mlog(0, "dentry found: %.*s\n",
  123. dentry->d_name.len, dentry->d_name.name);
  124. dget_locked(dentry);
  125. break;
  126. }
  127. dentry = NULL;
  128. }
  129. spin_unlock(&dcache_lock);
  130. return dentry;
  131. }
  132. DEFINE_SPINLOCK(dentry_attach_lock);
  133. /*
  134. * Attach this dentry to a cluster lock.
  135. *
  136. * Dentry locks cover all links in a given directory to a particular
  137. * inode. We do this so that ocfs2 can build a lock name which all
  138. * nodes in the cluster can agree on at all times. Shoving full names
  139. * in the cluster lock won't work due to size restrictions. Covering
  140. * links inside of a directory is a good compromise because it still
  141. * allows us to use the parent directory lock to synchronize
  142. * operations.
  143. *
  144. * Call this function with the parent dir semaphore and the parent dir
  145. * cluster lock held.
  146. *
  147. * The dir semaphore will protect us from having to worry about
  148. * concurrent processes on our node trying to attach a lock at the
  149. * same time.
  150. *
  151. * The dir cluster lock (held at either PR or EX mode) protects us
  152. * from unlink and rename on other nodes.
  153. *
  154. * A dput() can happen asynchronously due to pruning, so we cover
  155. * attaching and detaching the dentry lock with a
  156. * dentry_attach_lock.
  157. *
  158. * A node which has done lookup on a name retains a protected read
  159. * lock until final dput. If the user requests and unlink or rename,
  160. * the protected read is upgraded to an exclusive lock. Other nodes
  161. * who have seen the dentry will then be informed that they need to
  162. * downgrade their lock, which will involve d_delete on the
  163. * dentry. This happens in ocfs2_dentry_convert_worker().
  164. */
  165. int ocfs2_dentry_attach_lock(struct dentry *dentry,
  166. struct inode *inode,
  167. u64 parent_blkno)
  168. {
  169. int ret;
  170. struct dentry *alias;
  171. struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
  172. mlog(0, "Attach \"%.*s\", parent %llu, fsdata: %p\n",
  173. dentry->d_name.len, dentry->d_name.name,
  174. (unsigned long long)parent_blkno, dl);
  175. /*
  176. * Negative dentry. We ignore these for now.
  177. *
  178. * XXX: Could we can improve ocfs2_dentry_revalidate() by
  179. * tracking these?
  180. */
  181. if (!inode)
  182. return 0;
  183. if (dl) {
  184. mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno,
  185. " \"%.*s\": old parent: %llu, new: %llu\n",
  186. dentry->d_name.len, dentry->d_name.name,
  187. (unsigned long long)parent_blkno,
  188. (unsigned long long)dl->dl_parent_blkno);
  189. return 0;
  190. }
  191. alias = ocfs2_find_local_alias(inode, parent_blkno, 0);
  192. if (alias) {
  193. /*
  194. * Great, an alias exists, which means we must have a
  195. * dentry lock already. We can just grab the lock off
  196. * the alias and add it to the list.
  197. *
  198. * We're depending here on the fact that this dentry
  199. * was found and exists in the dcache and so must have
  200. * a reference to the dentry_lock because we can't
  201. * race creates. Final dput() cannot happen on it
  202. * since we have it pinned, so our reference is safe.
  203. */
  204. dl = alias->d_fsdata;
  205. mlog_bug_on_msg(!dl, "parent %llu, ino %llu\n",
  206. (unsigned long long)parent_blkno,
  207. (unsigned long long)OCFS2_I(inode)->ip_blkno);
  208. mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno,
  209. " \"%.*s\": old parent: %llu, new: %llu\n",
  210. dentry->d_name.len, dentry->d_name.name,
  211. (unsigned long long)parent_blkno,
  212. (unsigned long long)dl->dl_parent_blkno);
  213. mlog(0, "Found: %s\n", dl->dl_lockres.l_name);
  214. goto out_attach;
  215. }
  216. /*
  217. * There are no other aliases
  218. */
  219. dl = kmalloc(sizeof(*dl), GFP_NOFS);
  220. if (!dl) {
  221. ret = -ENOMEM;
  222. mlog_errno(ret);
  223. return ret;
  224. }
  225. dl->dl_count = 0;
  226. /*
  227. * Does this have to happen below, for all attaches, in case
  228. * the struct inode gets blown away by votes?
  229. */
  230. dl->dl_inode = igrab(inode);
  231. dl->dl_parent_blkno = parent_blkno;
  232. ocfs2_dentry_lock_res_init(dl, parent_blkno, inode);
  233. out_attach:
  234. spin_lock(&dentry_attach_lock);
  235. dentry->d_fsdata = dl;
  236. dl->dl_count++;
  237. spin_unlock(&dentry_attach_lock);
  238. /*
  239. * This actually gets us our PRMODE level lock. From now on,
  240. * we'll have a notification if one of these names is
  241. * destroyed on another node.
  242. */
  243. ret = ocfs2_dentry_lock(dentry, 0);
  244. if (!ret)
  245. ocfs2_dentry_unlock(dentry, 0);
  246. else
  247. mlog_errno(ret);
  248. dput(alias);
  249. return ret;
  250. }
  251. /*
  252. * ocfs2_dentry_iput() and friends.
  253. *
  254. * At this point, our particular dentry is detached from the inodes
  255. * alias list, so there's no way that the locking code can find it.
  256. *
  257. * The interesting stuff happens when we determine that our lock needs
  258. * to go away because this is the last subdir alias in the
  259. * system. This function needs to handle a couple things:
  260. *
  261. * 1) Synchronizing lock shutdown with the downconvert threads. This
  262. * is already handled for us via the lockres release drop function
  263. * called in ocfs2_release_dentry_lock()
  264. *
  265. * 2) A race may occur when we're doing our lock shutdown and
  266. * another process wants to create a new dentry lock. Right now we
  267. * let them race, which means that for a very short while, this
  268. * node might have two locks on a lock resource. This should be a
  269. * problem though because one of them is in the process of being
  270. * thrown out.
  271. */
  272. static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb,
  273. struct ocfs2_dentry_lock *dl)
  274. {
  275. ocfs2_simple_drop_lockres(osb, &dl->dl_lockres);
  276. ocfs2_lock_res_free(&dl->dl_lockres);
  277. iput(dl->dl_inode);
  278. kfree(dl);
  279. }
  280. void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
  281. struct ocfs2_dentry_lock *dl)
  282. {
  283. int unlock = 0;
  284. BUG_ON(dl->dl_count == 0);
  285. spin_lock(&dentry_attach_lock);
  286. dl->dl_count--;
  287. unlock = !dl->dl_count;
  288. spin_unlock(&dentry_attach_lock);
  289. if (unlock)
  290. ocfs2_drop_dentry_lock(osb, dl);
  291. }
  292. static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode)
  293. {
  294. struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
  295. mlog_bug_on_msg(!dl && !(dentry->d_flags & DCACHE_DISCONNECTED),
  296. "dentry: %.*s\n", dentry->d_name.len,
  297. dentry->d_name.name);
  298. if (!dl)
  299. goto out;
  300. mlog_bug_on_msg(dl->dl_count == 0, "dentry: %.*s, count: %u\n",
  301. dentry->d_name.len, dentry->d_name.name,
  302. dl->dl_count);
  303. ocfs2_dentry_lock_put(OCFS2_SB(dentry->d_sb), dl);
  304. out:
  305. iput(inode);
  306. }
  307. /*
  308. * d_move(), but keep the locks in sync.
  309. *
  310. * When we are done, "dentry" will have the parent dir and name of
  311. * "target", which will be thrown away.
  312. *
  313. * We manually update the lock of "dentry" if need be.
  314. *
  315. * "target" doesn't have it's dentry lock touched - we allow the later
  316. * dput() to handle this for us.
  317. *
  318. * This is called during ocfs2_rename(), while holding parent
  319. * directory locks. The dentries have already been deleted on other
  320. * nodes via ocfs2_remote_dentry_delete().
  321. *
  322. * Normally, the VFS handles the d_move() for the file sytem, after
  323. * the ->rename() callback. OCFS2 wants to handle this internally, so
  324. * the new lock can be created atomically with respect to the cluster.
  325. */
  326. void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target,
  327. struct inode *old_dir, struct inode *new_dir)
  328. {
  329. int ret;
  330. struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb);
  331. struct inode *inode = dentry->d_inode;
  332. /*
  333. * Move within the same directory, so the actual lock info won't
  334. * change.
  335. *
  336. * XXX: Is there any advantage to dropping the lock here?
  337. */
  338. if (old_dir == new_dir)
  339. goto out_move;
  340. ocfs2_dentry_lock_put(osb, dentry->d_fsdata);
  341. dentry->d_fsdata = NULL;
  342. ret = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(new_dir)->ip_blkno);
  343. if (ret)
  344. mlog_errno(ret);
  345. out_move:
  346. d_move(dentry, target);
  347. }
  348. struct dentry_operations ocfs2_dentry_ops = {
  349. .d_revalidate = ocfs2_dentry_revalidate,
  350. .d_iput = ocfs2_dentry_iput,
  351. };