dir.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/fs_struct.h>
  4. #include <linux/namei.h>
  5. #include <linux/slab.h>
  6. #include <linux/sched.h>
  7. #include "super.h"
  8. #include "mds_client.h"
  9. /*
  10. * Directory operations: readdir, lookup, create, link, unlink,
  11. * rename, etc.
  12. */
  13. /*
  14. * Ceph MDS operations are specified in terms of a base ino and
  15. * relative path. Thus, the client can specify an operation on a
  16. * specific inode (e.g., a getattr due to fstat(2)), or as a path
  17. * relative to, say, the root directory.
  18. *
  19. * Normally, we limit ourselves to strict inode ops (no path component)
  20. * or dentry operations (a single path component relative to an ino). The
  21. * exception to this is open_root_dentry(), which will open the mount
  22. * point by name.
  23. */
  24. const struct inode_operations ceph_dir_iops;
  25. const struct file_operations ceph_dir_fops;
  26. const struct dentry_operations ceph_dentry_ops;
  27. /*
  28. * Initialize ceph dentry state.
  29. */
  30. int ceph_init_dentry(struct dentry *dentry)
  31. {
  32. struct ceph_dentry_info *di;
  33. if (dentry->d_fsdata)
  34. return 0;
  35. if (dentry->d_parent == NULL || /* nfs fh_to_dentry */
  36. ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
  37. dentry->d_op = &ceph_dentry_ops;
  38. else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
  39. dentry->d_op = &ceph_snapdir_dentry_ops;
  40. else
  41. dentry->d_op = &ceph_snap_dentry_ops;
  42. di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
  43. if (!di)
  44. return -ENOMEM; /* oh well */
  45. spin_lock(&dentry->d_lock);
  46. if (dentry->d_fsdata) {
  47. /* lost a race */
  48. kmem_cache_free(ceph_dentry_cachep, di);
  49. goto out_unlock;
  50. }
  51. di->dentry = dentry;
  52. di->lease_session = NULL;
  53. dentry->d_fsdata = di;
  54. dentry->d_time = jiffies;
  55. ceph_dentry_lru_add(dentry);
  56. out_unlock:
  57. spin_unlock(&dentry->d_lock);
  58. return 0;
  59. }
  60. /*
  61. * for readdir, we encode the directory frag and offset within that
  62. * frag into f_pos.
  63. */
  64. static unsigned fpos_frag(loff_t p)
  65. {
  66. return p >> 32;
  67. }
  68. static unsigned fpos_off(loff_t p)
  69. {
  70. return p & 0xffffffff;
  71. }
  72. /*
  73. * When possible, we try to satisfy a readdir by peeking at the
  74. * dcache. We make this work by carefully ordering dentries on
  75. * d_u.d_child when we initially get results back from the MDS, and
  76. * falling back to a "normal" sync readdir if any dentries in the dir
  77. * are dropped.
  78. *
  79. * I_COMPLETE tells indicates we have all dentries in the dir. It is
  80. * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
  81. * the MDS if/when the directory is modified).
  82. */
  83. static int __dcache_readdir(struct file *filp,
  84. void *dirent, filldir_t filldir)
  85. {
  86. struct ceph_file_info *fi = filp->private_data;
  87. struct dentry *parent = filp->f_dentry;
  88. struct inode *dir = parent->d_inode;
  89. struct list_head *p;
  90. struct dentry *dentry, *last;
  91. struct ceph_dentry_info *di;
  92. int err = 0;
  93. /* claim ref on last dentry we returned */
  94. last = fi->dentry;
  95. fi->dentry = NULL;
  96. dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
  97. last);
  98. spin_lock(&dcache_lock);
  99. /* start at beginning? */
  100. if (filp->f_pos == 2 || last == NULL ||
  101. filp->f_pos < ceph_dentry(last)->offset) {
  102. if (list_empty(&parent->d_subdirs))
  103. goto out_unlock;
  104. p = parent->d_subdirs.prev;
  105. dout(" initial p %p/%p\n", p->prev, p->next);
  106. } else {
  107. p = last->d_u.d_child.prev;
  108. }
  109. more:
  110. dentry = list_entry(p, struct dentry, d_u.d_child);
  111. di = ceph_dentry(dentry);
  112. while (1) {
  113. dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
  114. d_unhashed(dentry) ? "!hashed" : "hashed",
  115. parent->d_subdirs.prev, parent->d_subdirs.next);
  116. if (p == &parent->d_subdirs) {
  117. fi->at_end = 1;
  118. goto out_unlock;
  119. }
  120. if (!d_unhashed(dentry) && dentry->d_inode &&
  121. ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
  122. ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
  123. filp->f_pos <= di->offset)
  124. break;
  125. dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
  126. dentry->d_name.len, dentry->d_name.name, di->offset,
  127. filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
  128. !dentry->d_inode ? " null" : "");
  129. p = p->prev;
  130. dentry = list_entry(p, struct dentry, d_u.d_child);
  131. di = ceph_dentry(dentry);
  132. }
  133. atomic_inc(&dentry->d_count);
  134. spin_unlock(&dcache_lock);
  135. dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
  136. dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
  137. filp->f_pos = di->offset;
  138. err = filldir(dirent, dentry->d_name.name,
  139. dentry->d_name.len, di->offset,
  140. dentry->d_inode->i_ino,
  141. dentry->d_inode->i_mode >> 12);
  142. if (last) {
  143. if (err < 0) {
  144. /* remember our position */
  145. fi->dentry = last;
  146. fi->next_offset = di->offset;
  147. } else {
  148. dput(last);
  149. }
  150. }
  151. last = dentry;
  152. if (err < 0)
  153. goto out;
  154. filp->f_pos++;
  155. /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
  156. if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
  157. dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
  158. err = -EAGAIN;
  159. goto out;
  160. }
  161. spin_lock(&dcache_lock);
  162. p = p->prev; /* advance to next dentry */
  163. goto more;
  164. out_unlock:
  165. spin_unlock(&dcache_lock);
  166. out:
  167. if (last)
  168. dput(last);
  169. return err;
  170. }
  171. /*
  172. * make note of the last dentry we read, so we can
  173. * continue at the same lexicographical point,
  174. * regardless of what dir changes take place on the
  175. * server.
  176. */
  177. static int note_last_dentry(struct ceph_file_info *fi, const char *name,
  178. int len)
  179. {
  180. kfree(fi->last_name);
  181. fi->last_name = kmalloc(len+1, GFP_NOFS);
  182. if (!fi->last_name)
  183. return -ENOMEM;
  184. memcpy(fi->last_name, name, len);
  185. fi->last_name[len] = 0;
  186. dout("note_last_dentry '%s'\n", fi->last_name);
  187. return 0;
  188. }
  189. static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
  190. {
  191. struct ceph_file_info *fi = filp->private_data;
  192. struct inode *inode = filp->f_dentry->d_inode;
  193. struct ceph_inode_info *ci = ceph_inode(inode);
  194. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  195. struct ceph_mds_client *mdsc = fsc->mdsc;
  196. unsigned frag = fpos_frag(filp->f_pos);
  197. int off = fpos_off(filp->f_pos);
  198. int err;
  199. u32 ftype;
  200. struct ceph_mds_reply_info_parsed *rinfo;
  201. const int max_entries = fsc->mount_options->max_readdir;
  202. const int max_bytes = fsc->mount_options->max_readdir_bytes;
  203. dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
  204. if (fi->at_end)
  205. return 0;
  206. /* always start with . and .. */
  207. if (filp->f_pos == 0) {
  208. /* note dir version at start of readdir so we can tell
  209. * if any dentries get dropped */
  210. fi->dir_release_count = ci->i_release_count;
  211. dout("readdir off 0 -> '.'\n");
  212. if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
  213. inode->i_ino, inode->i_mode >> 12) < 0)
  214. return 0;
  215. filp->f_pos = 1;
  216. off = 1;
  217. }
  218. if (filp->f_pos == 1) {
  219. dout("readdir off 1 -> '..'\n");
  220. if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
  221. filp->f_dentry->d_parent->d_inode->i_ino,
  222. inode->i_mode >> 12) < 0)
  223. return 0;
  224. filp->f_pos = 2;
  225. off = 2;
  226. }
  227. /* can we use the dcache? */
  228. spin_lock(&inode->i_lock);
  229. if ((filp->f_pos == 2 || fi->dentry) &&
  230. !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
  231. ceph_snap(inode) != CEPH_SNAPDIR &&
  232. (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
  233. __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
  234. spin_unlock(&inode->i_lock);
  235. err = __dcache_readdir(filp, dirent, filldir);
  236. if (err != -EAGAIN)
  237. return err;
  238. } else {
  239. spin_unlock(&inode->i_lock);
  240. }
  241. if (fi->dentry) {
  242. err = note_last_dentry(fi, fi->dentry->d_name.name,
  243. fi->dentry->d_name.len);
  244. if (err)
  245. return err;
  246. dput(fi->dentry);
  247. fi->dentry = NULL;
  248. }
  249. /* proceed with a normal readdir */
  250. more:
  251. /* do we have the correct frag content buffered? */
  252. if (fi->frag != frag || fi->last_readdir == NULL) {
  253. struct ceph_mds_request *req;
  254. int op = ceph_snap(inode) == CEPH_SNAPDIR ?
  255. CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
  256. /* discard old result, if any */
  257. if (fi->last_readdir) {
  258. ceph_mdsc_put_request(fi->last_readdir);
  259. fi->last_readdir = NULL;
  260. }
  261. /* requery frag tree, as the frag topology may have changed */
  262. frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
  263. dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
  264. ceph_vinop(inode), frag, fi->last_name);
  265. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  266. if (IS_ERR(req))
  267. return PTR_ERR(req);
  268. req->r_inode = igrab(inode);
  269. req->r_dentry = dget(filp->f_dentry);
  270. /* hints to request -> mds selection code */
  271. req->r_direct_mode = USE_AUTH_MDS;
  272. req->r_direct_hash = ceph_frag_value(frag);
  273. req->r_direct_is_hash = true;
  274. req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
  275. req->r_readdir_offset = fi->next_offset;
  276. req->r_args.readdir.frag = cpu_to_le32(frag);
  277. req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
  278. req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
  279. req->r_num_caps = max_entries + 1;
  280. err = ceph_mdsc_do_request(mdsc, NULL, req);
  281. if (err < 0) {
  282. ceph_mdsc_put_request(req);
  283. return err;
  284. }
  285. dout("readdir got and parsed readdir result=%d"
  286. " on frag %x, end=%d, complete=%d\n", err, frag,
  287. (int)req->r_reply_info.dir_end,
  288. (int)req->r_reply_info.dir_complete);
  289. if (!req->r_did_prepopulate) {
  290. dout("readdir !did_prepopulate");
  291. fi->dir_release_count--; /* preclude I_COMPLETE */
  292. }
  293. /* note next offset and last dentry name */
  294. fi->offset = fi->next_offset;
  295. fi->last_readdir = req;
  296. if (req->r_reply_info.dir_end) {
  297. kfree(fi->last_name);
  298. fi->last_name = NULL;
  299. if (ceph_frag_is_rightmost(frag))
  300. fi->next_offset = 2;
  301. else
  302. fi->next_offset = 0;
  303. } else {
  304. rinfo = &req->r_reply_info;
  305. err = note_last_dentry(fi,
  306. rinfo->dir_dname[rinfo->dir_nr-1],
  307. rinfo->dir_dname_len[rinfo->dir_nr-1]);
  308. if (err)
  309. return err;
  310. fi->next_offset += rinfo->dir_nr;
  311. }
  312. }
  313. rinfo = &fi->last_readdir->r_reply_info;
  314. dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
  315. rinfo->dir_nr, off, fi->offset);
  316. while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
  317. u64 pos = ceph_make_fpos(frag, off);
  318. struct ceph_mds_reply_inode *in =
  319. rinfo->dir_in[off - fi->offset].in;
  320. struct ceph_vino vino;
  321. ino_t ino;
  322. dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
  323. off, off - fi->offset, rinfo->dir_nr, pos,
  324. rinfo->dir_dname_len[off - fi->offset],
  325. rinfo->dir_dname[off - fi->offset], in);
  326. BUG_ON(!in);
  327. ftype = le32_to_cpu(in->mode) >> 12;
  328. vino.ino = le64_to_cpu(in->ino);
  329. vino.snap = le64_to_cpu(in->snapid);
  330. ino = ceph_vino_to_ino(vino);
  331. if (filldir(dirent,
  332. rinfo->dir_dname[off - fi->offset],
  333. rinfo->dir_dname_len[off - fi->offset],
  334. pos, ino, ftype) < 0) {
  335. dout("filldir stopping us...\n");
  336. return 0;
  337. }
  338. off++;
  339. filp->f_pos = pos + 1;
  340. }
  341. if (fi->last_name) {
  342. ceph_mdsc_put_request(fi->last_readdir);
  343. fi->last_readdir = NULL;
  344. goto more;
  345. }
  346. /* more frags? */
  347. if (!ceph_frag_is_rightmost(frag)) {
  348. frag = ceph_frag_next(frag);
  349. off = 0;
  350. filp->f_pos = ceph_make_fpos(frag, off);
  351. dout("readdir next frag is %x\n", frag);
  352. goto more;
  353. }
  354. fi->at_end = 1;
  355. /*
  356. * if dir_release_count still matches the dir, no dentries
  357. * were released during the whole readdir, and we should have
  358. * the complete dir contents in our cache.
  359. */
  360. spin_lock(&inode->i_lock);
  361. if (ci->i_release_count == fi->dir_release_count) {
  362. dout(" marking %p complete\n", inode);
  363. ci->i_ceph_flags |= CEPH_I_COMPLETE;
  364. ci->i_max_offset = filp->f_pos;
  365. }
  366. spin_unlock(&inode->i_lock);
  367. dout("readdir %p filp %p done.\n", inode, filp);
  368. return 0;
  369. }
  370. static void reset_readdir(struct ceph_file_info *fi)
  371. {
  372. if (fi->last_readdir) {
  373. ceph_mdsc_put_request(fi->last_readdir);
  374. fi->last_readdir = NULL;
  375. }
  376. kfree(fi->last_name);
  377. fi->last_name = NULL;
  378. fi->next_offset = 2; /* compensate for . and .. */
  379. if (fi->dentry) {
  380. dput(fi->dentry);
  381. fi->dentry = NULL;
  382. }
  383. fi->at_end = 0;
  384. }
  385. static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
  386. {
  387. struct ceph_file_info *fi = file->private_data;
  388. struct inode *inode = file->f_mapping->host;
  389. loff_t old_offset = offset;
  390. loff_t retval;
  391. mutex_lock(&inode->i_mutex);
  392. switch (origin) {
  393. case SEEK_END:
  394. offset += inode->i_size + 2; /* FIXME */
  395. break;
  396. case SEEK_CUR:
  397. offset += file->f_pos;
  398. }
  399. retval = -EINVAL;
  400. if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
  401. if (offset != file->f_pos) {
  402. file->f_pos = offset;
  403. file->f_version = 0;
  404. fi->at_end = 0;
  405. }
  406. retval = offset;
  407. /*
  408. * discard buffered readdir content on seekdir(0), or
  409. * seek to new frag, or seek prior to current chunk.
  410. */
  411. if (offset == 0 ||
  412. fpos_frag(offset) != fpos_frag(old_offset) ||
  413. fpos_off(offset) < fi->offset) {
  414. dout("dir_llseek dropping %p content\n", file);
  415. reset_readdir(fi);
  416. }
  417. /* bump dir_release_count if we did a forward seek */
  418. if (offset > old_offset)
  419. fi->dir_release_count--;
  420. }
  421. mutex_unlock(&inode->i_mutex);
  422. return retval;
  423. }
  424. /*
  425. * Process result of a lookup/open request.
  426. *
  427. * Mainly, make sure we return the final req->r_dentry (if it already
  428. * existed) in place of the original VFS-provided dentry when they
  429. * differ.
  430. *
  431. * Gracefully handle the case where the MDS replies with -ENOENT and
  432. * no trace (which it may do, at its discretion, e.g., if it doesn't
  433. * care to issue a lease on the negative dentry).
  434. */
  435. struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
  436. struct dentry *dentry, int err)
  437. {
  438. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  439. struct inode *parent = dentry->d_parent->d_inode;
  440. /* .snap dir? */
  441. if (err == -ENOENT &&
  442. strcmp(dentry->d_name.name,
  443. fsc->mount_options->snapdir_name) == 0) {
  444. struct inode *inode = ceph_get_snapdir(parent);
  445. dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
  446. dentry, dentry->d_name.len, dentry->d_name.name, inode);
  447. BUG_ON(!d_unhashed(dentry));
  448. d_add(dentry, inode);
  449. err = 0;
  450. }
  451. if (err == -ENOENT) {
  452. /* no trace? */
  453. err = 0;
  454. if (!req->r_reply_info.head->is_dentry) {
  455. dout("ENOENT and no trace, dentry %p inode %p\n",
  456. dentry, dentry->d_inode);
  457. if (dentry->d_inode) {
  458. d_drop(dentry);
  459. err = -ENOENT;
  460. } else {
  461. d_add(dentry, NULL);
  462. }
  463. }
  464. }
  465. if (err)
  466. dentry = ERR_PTR(err);
  467. else if (dentry != req->r_dentry)
  468. dentry = dget(req->r_dentry); /* we got spliced */
  469. else
  470. dentry = NULL;
  471. return dentry;
  472. }
  473. static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
  474. {
  475. return ceph_ino(inode) == CEPH_INO_ROOT &&
  476. strncmp(dentry->d_name.name, ".ceph", 5) == 0;
  477. }
  478. /*
  479. * Look up a single dir entry. If there is a lookup intent, inform
  480. * the MDS so that it gets our 'caps wanted' value in a single op.
  481. */
  482. static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
  483. struct nameidata *nd)
  484. {
  485. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  486. struct ceph_mds_client *mdsc = fsc->mdsc;
  487. struct ceph_mds_request *req;
  488. int op;
  489. int err;
  490. dout("lookup %p dentry %p '%.*s'\n",
  491. dir, dentry, dentry->d_name.len, dentry->d_name.name);
  492. if (dentry->d_name.len > NAME_MAX)
  493. return ERR_PTR(-ENAMETOOLONG);
  494. err = ceph_init_dentry(dentry);
  495. if (err < 0)
  496. return ERR_PTR(err);
  497. /* open (but not create!) intent? */
  498. if (nd &&
  499. (nd->flags & LOOKUP_OPEN) &&
  500. (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */
  501. !(nd->intent.open.flags & O_CREAT)) {
  502. int mode = nd->intent.open.create_mode & ~current->fs->umask;
  503. return ceph_lookup_open(dir, dentry, nd, mode, 1);
  504. }
  505. /* can we conclude ENOENT locally? */
  506. if (dentry->d_inode == NULL) {
  507. struct ceph_inode_info *ci = ceph_inode(dir);
  508. struct ceph_dentry_info *di = ceph_dentry(dentry);
  509. spin_lock(&dir->i_lock);
  510. dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
  511. if (strncmp(dentry->d_name.name,
  512. fsc->mount_options->snapdir_name,
  513. dentry->d_name.len) &&
  514. !is_root_ceph_dentry(dir, dentry) &&
  515. (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
  516. (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
  517. spin_unlock(&dir->i_lock);
  518. dout(" dir %p complete, -ENOENT\n", dir);
  519. d_add(dentry, NULL);
  520. di->lease_shared_gen = ci->i_shared_gen;
  521. return NULL;
  522. }
  523. spin_unlock(&dir->i_lock);
  524. }
  525. op = ceph_snap(dir) == CEPH_SNAPDIR ?
  526. CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
  527. req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
  528. if (IS_ERR(req))
  529. return ERR_CAST(req);
  530. req->r_dentry = dget(dentry);
  531. req->r_num_caps = 2;
  532. /* we only need inode linkage */
  533. req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
  534. req->r_locked_dir = dir;
  535. err = ceph_mdsc_do_request(mdsc, NULL, req);
  536. dentry = ceph_finish_lookup(req, dentry, err);
  537. ceph_mdsc_put_request(req); /* will dput(dentry) */
  538. dout("lookup result=%p\n", dentry);
  539. return dentry;
  540. }
  541. /*
  542. * If we do a create but get no trace back from the MDS, follow up with
  543. * a lookup (the VFS expects us to link up the provided dentry).
  544. */
  545. int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
  546. {
  547. struct dentry *result = ceph_lookup(dir, dentry, NULL);
  548. if (result && !IS_ERR(result)) {
  549. /*
  550. * We created the item, then did a lookup, and found
  551. * it was already linked to another inode we already
  552. * had in our cache (and thus got spliced). Link our
  553. * dentry to that inode, but don't hash it, just in
  554. * case the VFS wants to dereference it.
  555. */
  556. BUG_ON(!result->d_inode);
  557. d_instantiate(dentry, result->d_inode);
  558. return 0;
  559. }
  560. return PTR_ERR(result);
  561. }
  562. static int ceph_mknod(struct inode *dir, struct dentry *dentry,
  563. int mode, dev_t rdev)
  564. {
  565. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  566. struct ceph_mds_client *mdsc = fsc->mdsc;
  567. struct ceph_mds_request *req;
  568. int err;
  569. if (ceph_snap(dir) != CEPH_NOSNAP)
  570. return -EROFS;
  571. dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
  572. dir, dentry, mode, rdev);
  573. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
  574. if (IS_ERR(req)) {
  575. d_drop(dentry);
  576. return PTR_ERR(req);
  577. }
  578. req->r_dentry = dget(dentry);
  579. req->r_num_caps = 2;
  580. req->r_locked_dir = dir;
  581. req->r_args.mknod.mode = cpu_to_le32(mode);
  582. req->r_args.mknod.rdev = cpu_to_le32(rdev);
  583. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  584. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  585. err = ceph_mdsc_do_request(mdsc, dir, req);
  586. if (!err && !req->r_reply_info.head->is_dentry)
  587. err = ceph_handle_notrace_create(dir, dentry);
  588. ceph_mdsc_put_request(req);
  589. if (err)
  590. d_drop(dentry);
  591. return err;
  592. }
  593. static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
  594. struct nameidata *nd)
  595. {
  596. dout("create in dir %p dentry %p name '%.*s'\n",
  597. dir, dentry, dentry->d_name.len, dentry->d_name.name);
  598. if (ceph_snap(dir) != CEPH_NOSNAP)
  599. return -EROFS;
  600. if (nd) {
  601. BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
  602. dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
  603. /* hrm, what should i do here if we get aliased? */
  604. if (IS_ERR(dentry))
  605. return PTR_ERR(dentry);
  606. return 0;
  607. }
  608. /* fall back to mknod */
  609. return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
  610. }
  611. static int ceph_symlink(struct inode *dir, struct dentry *dentry,
  612. const char *dest)
  613. {
  614. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  615. struct ceph_mds_client *mdsc = fsc->mdsc;
  616. struct ceph_mds_request *req;
  617. int err;
  618. if (ceph_snap(dir) != CEPH_NOSNAP)
  619. return -EROFS;
  620. dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
  621. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
  622. if (IS_ERR(req)) {
  623. d_drop(dentry);
  624. return PTR_ERR(req);
  625. }
  626. req->r_dentry = dget(dentry);
  627. req->r_num_caps = 2;
  628. req->r_path2 = kstrdup(dest, GFP_NOFS);
  629. req->r_locked_dir = dir;
  630. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  631. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  632. err = ceph_mdsc_do_request(mdsc, dir, req);
  633. if (!err && !req->r_reply_info.head->is_dentry)
  634. err = ceph_handle_notrace_create(dir, dentry);
  635. ceph_mdsc_put_request(req);
  636. if (err)
  637. d_drop(dentry);
  638. return err;
  639. }
  640. static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
  641. {
  642. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  643. struct ceph_mds_client *mdsc = fsc->mdsc;
  644. struct ceph_mds_request *req;
  645. int err = -EROFS;
  646. int op;
  647. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  648. /* mkdir .snap/foo is a MKSNAP */
  649. op = CEPH_MDS_OP_MKSNAP;
  650. dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
  651. dentry->d_name.len, dentry->d_name.name, dentry);
  652. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  653. dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
  654. op = CEPH_MDS_OP_MKDIR;
  655. } else {
  656. goto out;
  657. }
  658. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  659. if (IS_ERR(req)) {
  660. err = PTR_ERR(req);
  661. goto out;
  662. }
  663. req->r_dentry = dget(dentry);
  664. req->r_num_caps = 2;
  665. req->r_locked_dir = dir;
  666. req->r_args.mkdir.mode = cpu_to_le32(mode);
  667. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  668. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  669. err = ceph_mdsc_do_request(mdsc, dir, req);
  670. if (!err && !req->r_reply_info.head->is_dentry)
  671. err = ceph_handle_notrace_create(dir, dentry);
  672. ceph_mdsc_put_request(req);
  673. out:
  674. if (err < 0)
  675. d_drop(dentry);
  676. return err;
  677. }
  678. static int ceph_link(struct dentry *old_dentry, struct inode *dir,
  679. struct dentry *dentry)
  680. {
  681. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  682. struct ceph_mds_client *mdsc = fsc->mdsc;
  683. struct ceph_mds_request *req;
  684. int err;
  685. if (ceph_snap(dir) != CEPH_NOSNAP)
  686. return -EROFS;
  687. dout("link in dir %p old_dentry %p dentry %p\n", dir,
  688. old_dentry, dentry);
  689. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
  690. if (IS_ERR(req)) {
  691. d_drop(dentry);
  692. return PTR_ERR(req);
  693. }
  694. req->r_dentry = dget(dentry);
  695. req->r_num_caps = 2;
  696. req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
  697. req->r_locked_dir = dir;
  698. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  699. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  700. err = ceph_mdsc_do_request(mdsc, dir, req);
  701. if (err)
  702. d_drop(dentry);
  703. else if (!req->r_reply_info.head->is_dentry)
  704. d_instantiate(dentry, igrab(old_dentry->d_inode));
  705. ceph_mdsc_put_request(req);
  706. return err;
  707. }
  708. /*
  709. * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
  710. * looks like the link count will hit 0, drop any other caps (other
  711. * than PIN) we don't specifically want (due to the file still being
  712. * open).
  713. */
  714. static int drop_caps_for_unlink(struct inode *inode)
  715. {
  716. struct ceph_inode_info *ci = ceph_inode(inode);
  717. int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
  718. spin_lock(&inode->i_lock);
  719. if (inode->i_nlink == 1) {
  720. drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
  721. ci->i_ceph_flags |= CEPH_I_NODELAY;
  722. }
  723. spin_unlock(&inode->i_lock);
  724. return drop;
  725. }
  726. /*
  727. * rmdir and unlink are differ only by the metadata op code
  728. */
  729. static int ceph_unlink(struct inode *dir, struct dentry *dentry)
  730. {
  731. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  732. struct ceph_mds_client *mdsc = fsc->mdsc;
  733. struct inode *inode = dentry->d_inode;
  734. struct ceph_mds_request *req;
  735. int err = -EROFS;
  736. int op;
  737. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  738. /* rmdir .snap/foo is RMSNAP */
  739. dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
  740. dentry->d_name.name, dentry);
  741. op = CEPH_MDS_OP_RMSNAP;
  742. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  743. dout("unlink/rmdir dir %p dn %p inode %p\n",
  744. dir, dentry, inode);
  745. op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
  746. CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
  747. } else
  748. goto out;
  749. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  750. if (IS_ERR(req)) {
  751. err = PTR_ERR(req);
  752. goto out;
  753. }
  754. req->r_dentry = dget(dentry);
  755. req->r_num_caps = 2;
  756. req->r_locked_dir = dir;
  757. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  758. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  759. req->r_inode_drop = drop_caps_for_unlink(inode);
  760. err = ceph_mdsc_do_request(mdsc, dir, req);
  761. if (!err && !req->r_reply_info.head->is_dentry)
  762. d_delete(dentry);
  763. ceph_mdsc_put_request(req);
  764. out:
  765. return err;
  766. }
  767. static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
  768. struct inode *new_dir, struct dentry *new_dentry)
  769. {
  770. struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
  771. struct ceph_mds_client *mdsc = fsc->mdsc;
  772. struct ceph_mds_request *req;
  773. int err;
  774. if (ceph_snap(old_dir) != ceph_snap(new_dir))
  775. return -EXDEV;
  776. if (ceph_snap(old_dir) != CEPH_NOSNAP ||
  777. ceph_snap(new_dir) != CEPH_NOSNAP)
  778. return -EROFS;
  779. dout("rename dir %p dentry %p to dir %p dentry %p\n",
  780. old_dir, old_dentry, new_dir, new_dentry);
  781. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
  782. if (IS_ERR(req))
  783. return PTR_ERR(req);
  784. req->r_dentry = dget(new_dentry);
  785. req->r_num_caps = 2;
  786. req->r_old_dentry = dget(old_dentry);
  787. req->r_locked_dir = new_dir;
  788. req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
  789. req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
  790. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  791. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  792. /* release LINK_RDCACHE on source inode (mds will lock it) */
  793. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  794. if (new_dentry->d_inode)
  795. req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
  796. err = ceph_mdsc_do_request(mdsc, old_dir, req);
  797. if (!err && !req->r_reply_info.head->is_dentry) {
  798. /*
  799. * Normally d_move() is done by fill_trace (called by
  800. * do_request, above). If there is no trace, we need
  801. * to do it here.
  802. */
  803. /* d_move screws up d_subdirs order */
  804. ceph_i_clear(new_dir, CEPH_I_COMPLETE);
  805. d_move(old_dentry, new_dentry);
  806. /* ensure target dentry is invalidated, despite
  807. rehashing bug in vfs_rename_dir */
  808. ceph_invalidate_dentry_lease(new_dentry);
  809. }
  810. ceph_mdsc_put_request(req);
  811. return err;
  812. }
  813. /*
  814. * Ensure a dentry lease will no longer revalidate.
  815. */
  816. void ceph_invalidate_dentry_lease(struct dentry *dentry)
  817. {
  818. spin_lock(&dentry->d_lock);
  819. dentry->d_time = jiffies;
  820. ceph_dentry(dentry)->lease_shared_gen = 0;
  821. spin_unlock(&dentry->d_lock);
  822. }
  823. /*
  824. * Check if dentry lease is valid. If not, delete the lease. Try to
  825. * renew if the least is more than half up.
  826. */
  827. static int dentry_lease_is_valid(struct dentry *dentry)
  828. {
  829. struct ceph_dentry_info *di;
  830. struct ceph_mds_session *s;
  831. int valid = 0;
  832. u32 gen;
  833. unsigned long ttl;
  834. struct ceph_mds_session *session = NULL;
  835. struct inode *dir = NULL;
  836. u32 seq = 0;
  837. spin_lock(&dentry->d_lock);
  838. di = ceph_dentry(dentry);
  839. if (di && di->lease_session) {
  840. s = di->lease_session;
  841. spin_lock(&s->s_cap_lock);
  842. gen = s->s_cap_gen;
  843. ttl = s->s_cap_ttl;
  844. spin_unlock(&s->s_cap_lock);
  845. if (di->lease_gen == gen &&
  846. time_before(jiffies, dentry->d_time) &&
  847. time_before(jiffies, ttl)) {
  848. valid = 1;
  849. if (di->lease_renew_after &&
  850. time_after(jiffies, di->lease_renew_after)) {
  851. /* we should renew */
  852. dir = dentry->d_parent->d_inode;
  853. session = ceph_get_mds_session(s);
  854. seq = di->lease_seq;
  855. di->lease_renew_after = 0;
  856. di->lease_renew_from = jiffies;
  857. }
  858. }
  859. }
  860. spin_unlock(&dentry->d_lock);
  861. if (session) {
  862. ceph_mdsc_lease_send_msg(session, dir, dentry,
  863. CEPH_MDS_LEASE_RENEW, seq);
  864. ceph_put_mds_session(session);
  865. }
  866. dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
  867. return valid;
  868. }
  869. /*
  870. * Check if directory-wide content lease/cap is valid.
  871. */
  872. static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
  873. {
  874. struct ceph_inode_info *ci = ceph_inode(dir);
  875. struct ceph_dentry_info *di = ceph_dentry(dentry);
  876. int valid = 0;
  877. spin_lock(&dir->i_lock);
  878. if (ci->i_shared_gen == di->lease_shared_gen)
  879. valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
  880. spin_unlock(&dir->i_lock);
  881. dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
  882. dir, (unsigned)ci->i_shared_gen, dentry,
  883. (unsigned)di->lease_shared_gen, valid);
  884. return valid;
  885. }
  886. /*
  887. * Check if cached dentry can be trusted.
  888. */
  889. static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
  890. {
  891. struct inode *dir = dentry->d_parent->d_inode;
  892. dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
  893. dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
  894. ceph_dentry(dentry)->offset);
  895. /* always trust cached snapped dentries, snapdir dentry */
  896. if (ceph_snap(dir) != CEPH_NOSNAP) {
  897. dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
  898. dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
  899. goto out_touch;
  900. }
  901. if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
  902. goto out_touch;
  903. if (dentry_lease_is_valid(dentry) ||
  904. dir_lease_is_valid(dir, dentry))
  905. goto out_touch;
  906. dout("d_revalidate %p invalid\n", dentry);
  907. d_drop(dentry);
  908. return 0;
  909. out_touch:
  910. ceph_dentry_lru_touch(dentry);
  911. return 1;
  912. }
  913. /*
  914. * When a dentry is released, clear the dir I_COMPLETE if it was part
  915. * of the current dir gen or if this is in the snapshot namespace.
  916. */
  917. static void ceph_dentry_release(struct dentry *dentry)
  918. {
  919. struct ceph_dentry_info *di = ceph_dentry(dentry);
  920. struct inode *parent_inode = NULL;
  921. u64 snapid = CEPH_NOSNAP;
  922. if (!IS_ROOT(dentry)) {
  923. parent_inode = dentry->d_parent->d_inode;
  924. if (parent_inode)
  925. snapid = ceph_snap(parent_inode);
  926. }
  927. dout("dentry_release %p parent %p\n", dentry, parent_inode);
  928. if (parent_inode && snapid != CEPH_SNAPDIR) {
  929. struct ceph_inode_info *ci = ceph_inode(parent_inode);
  930. spin_lock(&parent_inode->i_lock);
  931. if (ci->i_shared_gen == di->lease_shared_gen ||
  932. snapid <= CEPH_MAXSNAP) {
  933. dout(" clearing %p complete (d_release)\n",
  934. parent_inode);
  935. ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
  936. ci->i_release_count++;
  937. }
  938. spin_unlock(&parent_inode->i_lock);
  939. }
  940. if (di) {
  941. ceph_dentry_lru_del(dentry);
  942. if (di->lease_session)
  943. ceph_put_mds_session(di->lease_session);
  944. kmem_cache_free(ceph_dentry_cachep, di);
  945. dentry->d_fsdata = NULL;
  946. }
  947. }
  948. static int ceph_snapdir_d_revalidate(struct dentry *dentry,
  949. struct nameidata *nd)
  950. {
  951. /*
  952. * Eventually, we'll want to revalidate snapped metadata
  953. * too... probably...
  954. */
  955. return 1;
  956. }
  957. /*
  958. * read() on a dir. This weird interface hack only works if mounted
  959. * with '-o dirstat'.
  960. */
  961. static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
  962. loff_t *ppos)
  963. {
  964. struct ceph_file_info *cf = file->private_data;
  965. struct inode *inode = file->f_dentry->d_inode;
  966. struct ceph_inode_info *ci = ceph_inode(inode);
  967. int left;
  968. if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
  969. return -EISDIR;
  970. if (!cf->dir_info) {
  971. cf->dir_info = kmalloc(1024, GFP_NOFS);
  972. if (!cf->dir_info)
  973. return -ENOMEM;
  974. cf->dir_info_len =
  975. sprintf(cf->dir_info,
  976. "entries: %20lld\n"
  977. " files: %20lld\n"
  978. " subdirs: %20lld\n"
  979. "rentries: %20lld\n"
  980. " rfiles: %20lld\n"
  981. " rsubdirs: %20lld\n"
  982. "rbytes: %20lld\n"
  983. "rctime: %10ld.%09ld\n",
  984. ci->i_files + ci->i_subdirs,
  985. ci->i_files,
  986. ci->i_subdirs,
  987. ci->i_rfiles + ci->i_rsubdirs,
  988. ci->i_rfiles,
  989. ci->i_rsubdirs,
  990. ci->i_rbytes,
  991. (long)ci->i_rctime.tv_sec,
  992. (long)ci->i_rctime.tv_nsec);
  993. }
  994. if (*ppos >= cf->dir_info_len)
  995. return 0;
  996. size = min_t(unsigned, size, cf->dir_info_len-*ppos);
  997. left = copy_to_user(buf, cf->dir_info + *ppos, size);
  998. if (left == size)
  999. return -EFAULT;
  1000. *ppos += (size - left);
  1001. return size - left;
  1002. }
  1003. /*
  1004. * an fsync() on a dir will wait for any uncommitted directory
  1005. * operations to commit.
  1006. */
  1007. static int ceph_dir_fsync(struct file *file, int datasync)
  1008. {
  1009. struct inode *inode = file->f_path.dentry->d_inode;
  1010. struct ceph_inode_info *ci = ceph_inode(inode);
  1011. struct list_head *head = &ci->i_unsafe_dirops;
  1012. struct ceph_mds_request *req;
  1013. u64 last_tid;
  1014. int ret = 0;
  1015. dout("dir_fsync %p\n", inode);
  1016. spin_lock(&ci->i_unsafe_lock);
  1017. if (list_empty(head))
  1018. goto out;
  1019. req = list_entry(head->prev,
  1020. struct ceph_mds_request, r_unsafe_dir_item);
  1021. last_tid = req->r_tid;
  1022. do {
  1023. ceph_mdsc_get_request(req);
  1024. spin_unlock(&ci->i_unsafe_lock);
  1025. dout("dir_fsync %p wait on tid %llu (until %llu)\n",
  1026. inode, req->r_tid, last_tid);
  1027. if (req->r_timeout) {
  1028. ret = wait_for_completion_timeout(
  1029. &req->r_safe_completion, req->r_timeout);
  1030. if (ret > 0)
  1031. ret = 0;
  1032. else if (ret == 0)
  1033. ret = -EIO; /* timed out */
  1034. } else {
  1035. wait_for_completion(&req->r_safe_completion);
  1036. }
  1037. spin_lock(&ci->i_unsafe_lock);
  1038. ceph_mdsc_put_request(req);
  1039. if (ret || list_empty(head))
  1040. break;
  1041. req = list_entry(head->next,
  1042. struct ceph_mds_request, r_unsafe_dir_item);
  1043. } while (req->r_tid < last_tid);
  1044. out:
  1045. spin_unlock(&ci->i_unsafe_lock);
  1046. return ret;
  1047. }
  1048. /*
  1049. * We maintain a private dentry LRU.
  1050. *
  1051. * FIXME: this needs to be changed to a per-mds lru to be useful.
  1052. */
  1053. void ceph_dentry_lru_add(struct dentry *dn)
  1054. {
  1055. struct ceph_dentry_info *di = ceph_dentry(dn);
  1056. struct ceph_mds_client *mdsc;
  1057. dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
  1058. dn->d_name.len, dn->d_name.name);
  1059. if (di) {
  1060. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1061. spin_lock(&mdsc->dentry_lru_lock);
  1062. list_add_tail(&di->lru, &mdsc->dentry_lru);
  1063. mdsc->num_dentry++;
  1064. spin_unlock(&mdsc->dentry_lru_lock);
  1065. }
  1066. }
  1067. void ceph_dentry_lru_touch(struct dentry *dn)
  1068. {
  1069. struct ceph_dentry_info *di = ceph_dentry(dn);
  1070. struct ceph_mds_client *mdsc;
  1071. dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
  1072. dn->d_name.len, dn->d_name.name, di->offset);
  1073. if (di) {
  1074. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1075. spin_lock(&mdsc->dentry_lru_lock);
  1076. list_move_tail(&di->lru, &mdsc->dentry_lru);
  1077. spin_unlock(&mdsc->dentry_lru_lock);
  1078. }
  1079. }
  1080. void ceph_dentry_lru_del(struct dentry *dn)
  1081. {
  1082. struct ceph_dentry_info *di = ceph_dentry(dn);
  1083. struct ceph_mds_client *mdsc;
  1084. dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
  1085. dn->d_name.len, dn->d_name.name);
  1086. if (di) {
  1087. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1088. spin_lock(&mdsc->dentry_lru_lock);
  1089. list_del_init(&di->lru);
  1090. mdsc->num_dentry--;
  1091. spin_unlock(&mdsc->dentry_lru_lock);
  1092. }
  1093. }
  1094. const struct file_operations ceph_dir_fops = {
  1095. .read = ceph_read_dir,
  1096. .readdir = ceph_readdir,
  1097. .llseek = ceph_dir_llseek,
  1098. .open = ceph_open,
  1099. .release = ceph_release,
  1100. .unlocked_ioctl = ceph_ioctl,
  1101. .fsync = ceph_dir_fsync,
  1102. };
  1103. const struct inode_operations ceph_dir_iops = {
  1104. .lookup = ceph_lookup,
  1105. .permission = ceph_permission,
  1106. .getattr = ceph_getattr,
  1107. .setattr = ceph_setattr,
  1108. .setxattr = ceph_setxattr,
  1109. .getxattr = ceph_getxattr,
  1110. .listxattr = ceph_listxattr,
  1111. .removexattr = ceph_removexattr,
  1112. .mknod = ceph_mknod,
  1113. .symlink = ceph_symlink,
  1114. .mkdir = ceph_mkdir,
  1115. .link = ceph_link,
  1116. .unlink = ceph_unlink,
  1117. .rmdir = ceph_unlink,
  1118. .rename = ceph_rename,
  1119. .create = ceph_create,
  1120. };
  1121. const struct dentry_operations ceph_dentry_ops = {
  1122. .d_revalidate = ceph_d_revalidate,
  1123. .d_release = ceph_dentry_release,
  1124. };
  1125. const struct dentry_operations ceph_snapdir_dentry_ops = {
  1126. .d_revalidate = ceph_snapdir_d_revalidate,
  1127. .d_release = ceph_dentry_release,
  1128. };
  1129. const struct dentry_operations ceph_snap_dentry_ops = {
  1130. .d_release = ceph_dentry_release,
  1131. };