dir.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/fs_struct.h>
  4. #include <linux/namei.h>
  5. #include <linux/slab.h>
  6. #include <linux/sched.h>
  7. #include "super.h"
  8. #include "mds_client.h"
  9. /*
  10. * Directory operations: readdir, lookup, create, link, unlink,
  11. * rename, etc.
  12. */
  13. /*
  14. * Ceph MDS operations are specified in terms of a base ino and
  15. * relative path. Thus, the client can specify an operation on a
  16. * specific inode (e.g., a getattr due to fstat(2)), or as a path
  17. * relative to, say, the root directory.
  18. *
  19. * Normally, we limit ourselves to strict inode ops (no path component)
  20. * or dentry operations (a single path component relative to an ino). The
  21. * exception to this is open_root_dentry(), which will open the mount
  22. * point by name.
  23. */
  24. const struct inode_operations ceph_dir_iops;
  25. const struct file_operations ceph_dir_fops;
  26. const struct dentry_operations ceph_dentry_ops;
  27. /*
  28. * Initialize ceph dentry state.
  29. */
  30. int ceph_init_dentry(struct dentry *dentry)
  31. {
  32. struct ceph_dentry_info *di;
  33. if (dentry->d_fsdata)
  34. return 0;
  35. if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
  36. dentry->d_op = &ceph_dentry_ops;
  37. else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
  38. dentry->d_op = &ceph_snapdir_dentry_ops;
  39. else
  40. dentry->d_op = &ceph_snap_dentry_ops;
  41. di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
  42. if (!di)
  43. return -ENOMEM; /* oh well */
  44. spin_lock(&dentry->d_lock);
  45. if (dentry->d_fsdata) {
  46. /* lost a race */
  47. kmem_cache_free(ceph_dentry_cachep, di);
  48. goto out_unlock;
  49. }
  50. di->dentry = dentry;
  51. di->lease_session = NULL;
  52. dentry->d_fsdata = di;
  53. dentry->d_time = jiffies;
  54. ceph_dentry_lru_add(dentry);
  55. out_unlock:
  56. spin_unlock(&dentry->d_lock);
  57. return 0;
  58. }
  59. /*
  60. * for readdir, we encode the directory frag and offset within that
  61. * frag into f_pos.
  62. */
  63. static unsigned fpos_frag(loff_t p)
  64. {
  65. return p >> 32;
  66. }
  67. static unsigned fpos_off(loff_t p)
  68. {
  69. return p & 0xffffffff;
  70. }
  71. /*
  72. * When possible, we try to satisfy a readdir by peeking at the
  73. * dcache. We make this work by carefully ordering dentries on
  74. * d_u.d_child when we initially get results back from the MDS, and
  75. * falling back to a "normal" sync readdir if any dentries in the dir
  76. * are dropped.
  77. *
  78. * I_COMPLETE tells indicates we have all dentries in the dir. It is
  79. * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
  80. * the MDS if/when the directory is modified).
  81. */
  82. static int __dcache_readdir(struct file *filp,
  83. void *dirent, filldir_t filldir)
  84. {
  85. struct ceph_file_info *fi = filp->private_data;
  86. struct dentry *parent = filp->f_dentry;
  87. struct inode *dir = parent->d_inode;
  88. struct list_head *p;
  89. struct dentry *dentry, *last;
  90. struct ceph_dentry_info *di;
  91. int err = 0;
  92. /* claim ref on last dentry we returned */
  93. last = fi->dentry;
  94. fi->dentry = NULL;
  95. dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
  96. last);
  97. spin_lock(&dcache_lock);
  98. /* start at beginning? */
  99. if (filp->f_pos == 2 || (last &&
  100. filp->f_pos < ceph_dentry(last)->offset)) {
  101. if (list_empty(&parent->d_subdirs))
  102. goto out_unlock;
  103. p = parent->d_subdirs.prev;
  104. dout(" initial p %p/%p\n", p->prev, p->next);
  105. } else {
  106. p = last->d_u.d_child.prev;
  107. }
  108. more:
  109. dentry = list_entry(p, struct dentry, d_u.d_child);
  110. di = ceph_dentry(dentry);
  111. while (1) {
  112. dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
  113. d_unhashed(dentry) ? "!hashed" : "hashed",
  114. parent->d_subdirs.prev, parent->d_subdirs.next);
  115. if (p == &parent->d_subdirs) {
  116. fi->at_end = 1;
  117. goto out_unlock;
  118. }
  119. if (!d_unhashed(dentry) && dentry->d_inode &&
  120. ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
  121. ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
  122. filp->f_pos <= di->offset)
  123. break;
  124. dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
  125. dentry->d_name.len, dentry->d_name.name, di->offset,
  126. filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
  127. !dentry->d_inode ? " null" : "");
  128. p = p->prev;
  129. dentry = list_entry(p, struct dentry, d_u.d_child);
  130. di = ceph_dentry(dentry);
  131. }
  132. atomic_inc(&dentry->d_count);
  133. spin_unlock(&dcache_lock);
  134. dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
  135. dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
  136. filp->f_pos = di->offset;
  137. err = filldir(dirent, dentry->d_name.name,
  138. dentry->d_name.len, di->offset,
  139. dentry->d_inode->i_ino,
  140. dentry->d_inode->i_mode >> 12);
  141. if (last) {
  142. if (err < 0) {
  143. /* remember our position */
  144. fi->dentry = last;
  145. fi->next_offset = di->offset;
  146. } else {
  147. dput(last);
  148. }
  149. }
  150. last = dentry;
  151. if (err < 0)
  152. goto out;
  153. filp->f_pos++;
  154. /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
  155. if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
  156. dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
  157. err = -EAGAIN;
  158. goto out;
  159. }
  160. spin_lock(&dcache_lock);
  161. p = p->prev; /* advance to next dentry */
  162. goto more;
  163. out_unlock:
  164. spin_unlock(&dcache_lock);
  165. out:
  166. if (last)
  167. dput(last);
  168. return err;
  169. }
  170. /*
  171. * make note of the last dentry we read, so we can
  172. * continue at the same lexicographical point,
  173. * regardless of what dir changes take place on the
  174. * server.
  175. */
  176. static int note_last_dentry(struct ceph_file_info *fi, const char *name,
  177. int len)
  178. {
  179. kfree(fi->last_name);
  180. fi->last_name = kmalloc(len+1, GFP_NOFS);
  181. if (!fi->last_name)
  182. return -ENOMEM;
  183. memcpy(fi->last_name, name, len);
  184. fi->last_name[len] = 0;
  185. dout("note_last_dentry '%s'\n", fi->last_name);
  186. return 0;
  187. }
  188. static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
  189. {
  190. struct ceph_file_info *fi = filp->private_data;
  191. struct inode *inode = filp->f_dentry->d_inode;
  192. struct ceph_inode_info *ci = ceph_inode(inode);
  193. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  194. struct ceph_mds_client *mdsc = fsc->mdsc;
  195. unsigned frag = fpos_frag(filp->f_pos);
  196. int off = fpos_off(filp->f_pos);
  197. int err;
  198. u32 ftype;
  199. struct ceph_mds_reply_info_parsed *rinfo;
  200. const int max_entries = fsc->mount_options->max_readdir;
  201. const int max_bytes = fsc->mount_options->max_readdir_bytes;
  202. dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
  203. if (fi->at_end)
  204. return 0;
  205. /* always start with . and .. */
  206. if (filp->f_pos == 0) {
  207. /* note dir version at start of readdir so we can tell
  208. * if any dentries get dropped */
  209. fi->dir_release_count = ci->i_release_count;
  210. dout("readdir off 0 -> '.'\n");
  211. if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
  212. inode->i_ino, inode->i_mode >> 12) < 0)
  213. return 0;
  214. filp->f_pos = 1;
  215. off = 1;
  216. }
  217. if (filp->f_pos == 1) {
  218. dout("readdir off 1 -> '..'\n");
  219. if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
  220. filp->f_dentry->d_parent->d_inode->i_ino,
  221. inode->i_mode >> 12) < 0)
  222. return 0;
  223. filp->f_pos = 2;
  224. off = 2;
  225. }
  226. /* can we use the dcache? */
  227. spin_lock(&inode->i_lock);
  228. if ((filp->f_pos == 2 || fi->dentry) &&
  229. !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
  230. ceph_snap(inode) != CEPH_SNAPDIR &&
  231. (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
  232. __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
  233. spin_unlock(&inode->i_lock);
  234. err = __dcache_readdir(filp, dirent, filldir);
  235. if (err != -EAGAIN)
  236. return err;
  237. } else {
  238. spin_unlock(&inode->i_lock);
  239. }
  240. if (fi->dentry) {
  241. err = note_last_dentry(fi, fi->dentry->d_name.name,
  242. fi->dentry->d_name.len);
  243. if (err)
  244. return err;
  245. dput(fi->dentry);
  246. fi->dentry = NULL;
  247. }
  248. /* proceed with a normal readdir */
  249. more:
  250. /* do we have the correct frag content buffered? */
  251. if (fi->frag != frag || fi->last_readdir == NULL) {
  252. struct ceph_mds_request *req;
  253. int op = ceph_snap(inode) == CEPH_SNAPDIR ?
  254. CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
  255. /* discard old result, if any */
  256. if (fi->last_readdir) {
  257. ceph_mdsc_put_request(fi->last_readdir);
  258. fi->last_readdir = NULL;
  259. }
  260. /* requery frag tree, as the frag topology may have changed */
  261. frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
  262. dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
  263. ceph_vinop(inode), frag, fi->last_name);
  264. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  265. if (IS_ERR(req))
  266. return PTR_ERR(req);
  267. req->r_inode = igrab(inode);
  268. req->r_dentry = dget(filp->f_dentry);
  269. /* hints to request -> mds selection code */
  270. req->r_direct_mode = USE_AUTH_MDS;
  271. req->r_direct_hash = ceph_frag_value(frag);
  272. req->r_direct_is_hash = true;
  273. req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
  274. req->r_readdir_offset = fi->next_offset;
  275. req->r_args.readdir.frag = cpu_to_le32(frag);
  276. req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
  277. req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
  278. req->r_num_caps = max_entries + 1;
  279. err = ceph_mdsc_do_request(mdsc, NULL, req);
  280. if (err < 0) {
  281. ceph_mdsc_put_request(req);
  282. return err;
  283. }
  284. dout("readdir got and parsed readdir result=%d"
  285. " on frag %x, end=%d, complete=%d\n", err, frag,
  286. (int)req->r_reply_info.dir_end,
  287. (int)req->r_reply_info.dir_complete);
  288. if (!req->r_did_prepopulate) {
  289. dout("readdir !did_prepopulate");
  290. fi->dir_release_count--; /* preclude I_COMPLETE */
  291. }
  292. /* note next offset and last dentry name */
  293. fi->offset = fi->next_offset;
  294. fi->last_readdir = req;
  295. if (req->r_reply_info.dir_end) {
  296. kfree(fi->last_name);
  297. fi->last_name = NULL;
  298. if (ceph_frag_is_rightmost(frag))
  299. fi->next_offset = 2;
  300. else
  301. fi->next_offset = 0;
  302. } else {
  303. rinfo = &req->r_reply_info;
  304. err = note_last_dentry(fi,
  305. rinfo->dir_dname[rinfo->dir_nr-1],
  306. rinfo->dir_dname_len[rinfo->dir_nr-1]);
  307. if (err)
  308. return err;
  309. fi->next_offset += rinfo->dir_nr;
  310. }
  311. }
  312. rinfo = &fi->last_readdir->r_reply_info;
  313. dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
  314. rinfo->dir_nr, off, fi->offset);
  315. while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
  316. u64 pos = ceph_make_fpos(frag, off);
  317. struct ceph_mds_reply_inode *in =
  318. rinfo->dir_in[off - fi->offset].in;
  319. struct ceph_vino vino;
  320. ino_t ino;
  321. dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
  322. off, off - fi->offset, rinfo->dir_nr, pos,
  323. rinfo->dir_dname_len[off - fi->offset],
  324. rinfo->dir_dname[off - fi->offset], in);
  325. BUG_ON(!in);
  326. ftype = le32_to_cpu(in->mode) >> 12;
  327. vino.ino = le64_to_cpu(in->ino);
  328. vino.snap = le64_to_cpu(in->snapid);
  329. ino = ceph_vino_to_ino(vino);
  330. if (filldir(dirent,
  331. rinfo->dir_dname[off - fi->offset],
  332. rinfo->dir_dname_len[off - fi->offset],
  333. pos, ino, ftype) < 0) {
  334. dout("filldir stopping us...\n");
  335. return 0;
  336. }
  337. off++;
  338. filp->f_pos = pos + 1;
  339. }
  340. if (fi->last_name) {
  341. ceph_mdsc_put_request(fi->last_readdir);
  342. fi->last_readdir = NULL;
  343. goto more;
  344. }
  345. /* more frags? */
  346. if (!ceph_frag_is_rightmost(frag)) {
  347. frag = ceph_frag_next(frag);
  348. off = 0;
  349. filp->f_pos = ceph_make_fpos(frag, off);
  350. dout("readdir next frag is %x\n", frag);
  351. goto more;
  352. }
  353. fi->at_end = 1;
  354. /*
  355. * if dir_release_count still matches the dir, no dentries
  356. * were released during the whole readdir, and we should have
  357. * the complete dir contents in our cache.
  358. */
  359. spin_lock(&inode->i_lock);
  360. if (ci->i_release_count == fi->dir_release_count) {
  361. dout(" marking %p complete\n", inode);
  362. ci->i_ceph_flags |= CEPH_I_COMPLETE;
  363. ci->i_max_offset = filp->f_pos;
  364. }
  365. spin_unlock(&inode->i_lock);
  366. dout("readdir %p filp %p done.\n", inode, filp);
  367. return 0;
  368. }
  369. static void reset_readdir(struct ceph_file_info *fi)
  370. {
  371. if (fi->last_readdir) {
  372. ceph_mdsc_put_request(fi->last_readdir);
  373. fi->last_readdir = NULL;
  374. }
  375. kfree(fi->last_name);
  376. fi->last_name = NULL;
  377. fi->next_offset = 2; /* compensate for . and .. */
  378. if (fi->dentry) {
  379. dput(fi->dentry);
  380. fi->dentry = NULL;
  381. }
  382. fi->at_end = 0;
  383. }
  384. static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
  385. {
  386. struct ceph_file_info *fi = file->private_data;
  387. struct inode *inode = file->f_mapping->host;
  388. loff_t old_offset = offset;
  389. loff_t retval;
  390. mutex_lock(&inode->i_mutex);
  391. switch (origin) {
  392. case SEEK_END:
  393. offset += inode->i_size + 2; /* FIXME */
  394. break;
  395. case SEEK_CUR:
  396. offset += file->f_pos;
  397. }
  398. retval = -EINVAL;
  399. if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
  400. if (offset != file->f_pos) {
  401. file->f_pos = offset;
  402. file->f_version = 0;
  403. fi->at_end = 0;
  404. }
  405. retval = offset;
  406. /*
  407. * discard buffered readdir content on seekdir(0), or
  408. * seek to new frag, or seek prior to current chunk.
  409. */
  410. if (offset == 0 ||
  411. fpos_frag(offset) != fpos_frag(old_offset) ||
  412. fpos_off(offset) < fi->offset) {
  413. dout("dir_llseek dropping %p content\n", file);
  414. reset_readdir(fi);
  415. }
  416. /* bump dir_release_count if we did a forward seek */
  417. if (offset > old_offset)
  418. fi->dir_release_count--;
  419. }
  420. mutex_unlock(&inode->i_mutex);
  421. return retval;
  422. }
  423. /*
  424. * Process result of a lookup/open request.
  425. *
  426. * Mainly, make sure we return the final req->r_dentry (if it already
  427. * existed) in place of the original VFS-provided dentry when they
  428. * differ.
  429. *
  430. * Gracefully handle the case where the MDS replies with -ENOENT and
  431. * no trace (which it may do, at its discretion, e.g., if it doesn't
  432. * care to issue a lease on the negative dentry).
  433. */
  434. struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
  435. struct dentry *dentry, int err)
  436. {
  437. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  438. struct inode *parent = dentry->d_parent->d_inode;
  439. /* .snap dir? */
  440. if (err == -ENOENT &&
  441. strcmp(dentry->d_name.name,
  442. fsc->mount_options->snapdir_name) == 0) {
  443. struct inode *inode = ceph_get_snapdir(parent);
  444. dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
  445. dentry, dentry->d_name.len, dentry->d_name.name, inode);
  446. BUG_ON(!d_unhashed(dentry));
  447. d_add(dentry, inode);
  448. err = 0;
  449. }
  450. if (err == -ENOENT) {
  451. /* no trace? */
  452. err = 0;
  453. if (!req->r_reply_info.head->is_dentry) {
  454. dout("ENOENT and no trace, dentry %p inode %p\n",
  455. dentry, dentry->d_inode);
  456. if (dentry->d_inode) {
  457. d_drop(dentry);
  458. err = -ENOENT;
  459. } else {
  460. d_add(dentry, NULL);
  461. }
  462. }
  463. }
  464. if (err)
  465. dentry = ERR_PTR(err);
  466. else if (dentry != req->r_dentry)
  467. dentry = dget(req->r_dentry); /* we got spliced */
  468. else
  469. dentry = NULL;
  470. return dentry;
  471. }
  472. static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
  473. {
  474. return ceph_ino(inode) == CEPH_INO_ROOT &&
  475. strncmp(dentry->d_name.name, ".ceph", 5) == 0;
  476. }
  477. /*
  478. * Look up a single dir entry. If there is a lookup intent, inform
  479. * the MDS so that it gets our 'caps wanted' value in a single op.
  480. */
  481. static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
  482. struct nameidata *nd)
  483. {
  484. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  485. struct ceph_mds_client *mdsc = fsc->mdsc;
  486. struct ceph_mds_request *req;
  487. int op;
  488. int err;
  489. dout("lookup %p dentry %p '%.*s'\n",
  490. dir, dentry, dentry->d_name.len, dentry->d_name.name);
  491. if (dentry->d_name.len > NAME_MAX)
  492. return ERR_PTR(-ENAMETOOLONG);
  493. err = ceph_init_dentry(dentry);
  494. if (err < 0)
  495. return ERR_PTR(err);
  496. /* open (but not create!) intent? */
  497. if (nd &&
  498. (nd->flags & LOOKUP_OPEN) &&
  499. (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */
  500. !(nd->intent.open.flags & O_CREAT)) {
  501. int mode = nd->intent.open.create_mode & ~current->fs->umask;
  502. return ceph_lookup_open(dir, dentry, nd, mode, 1);
  503. }
  504. /* can we conclude ENOENT locally? */
  505. if (dentry->d_inode == NULL) {
  506. struct ceph_inode_info *ci = ceph_inode(dir);
  507. struct ceph_dentry_info *di = ceph_dentry(dentry);
  508. spin_lock(&dir->i_lock);
  509. dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
  510. if (strncmp(dentry->d_name.name,
  511. fsc->mount_options->snapdir_name,
  512. dentry->d_name.len) &&
  513. !is_root_ceph_dentry(dir, dentry) &&
  514. (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
  515. (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
  516. spin_unlock(&dir->i_lock);
  517. dout(" dir %p complete, -ENOENT\n", dir);
  518. d_add(dentry, NULL);
  519. di->lease_shared_gen = ci->i_shared_gen;
  520. return NULL;
  521. }
  522. spin_unlock(&dir->i_lock);
  523. }
  524. op = ceph_snap(dir) == CEPH_SNAPDIR ?
  525. CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
  526. req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
  527. if (IS_ERR(req))
  528. return ERR_CAST(req);
  529. req->r_dentry = dget(dentry);
  530. req->r_num_caps = 2;
  531. /* we only need inode linkage */
  532. req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
  533. req->r_locked_dir = dir;
  534. err = ceph_mdsc_do_request(mdsc, NULL, req);
  535. dentry = ceph_finish_lookup(req, dentry, err);
  536. ceph_mdsc_put_request(req); /* will dput(dentry) */
  537. dout("lookup result=%p\n", dentry);
  538. return dentry;
  539. }
  540. /*
  541. * If we do a create but get no trace back from the MDS, follow up with
  542. * a lookup (the VFS expects us to link up the provided dentry).
  543. */
  544. int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
  545. {
  546. struct dentry *result = ceph_lookup(dir, dentry, NULL);
  547. if (result && !IS_ERR(result)) {
  548. /*
  549. * We created the item, then did a lookup, and found
  550. * it was already linked to another inode we already
  551. * had in our cache (and thus got spliced). Link our
  552. * dentry to that inode, but don't hash it, just in
  553. * case the VFS wants to dereference it.
  554. */
  555. BUG_ON(!result->d_inode);
  556. d_instantiate(dentry, result->d_inode);
  557. return 0;
  558. }
  559. return PTR_ERR(result);
  560. }
  561. static int ceph_mknod(struct inode *dir, struct dentry *dentry,
  562. int mode, dev_t rdev)
  563. {
  564. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  565. struct ceph_mds_client *mdsc = fsc->mdsc;
  566. struct ceph_mds_request *req;
  567. int err;
  568. if (ceph_snap(dir) != CEPH_NOSNAP)
  569. return -EROFS;
  570. dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
  571. dir, dentry, mode, rdev);
  572. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
  573. if (IS_ERR(req)) {
  574. d_drop(dentry);
  575. return PTR_ERR(req);
  576. }
  577. req->r_dentry = dget(dentry);
  578. req->r_num_caps = 2;
  579. req->r_locked_dir = dir;
  580. req->r_args.mknod.mode = cpu_to_le32(mode);
  581. req->r_args.mknod.rdev = cpu_to_le32(rdev);
  582. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  583. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  584. err = ceph_mdsc_do_request(mdsc, dir, req);
  585. if (!err && !req->r_reply_info.head->is_dentry)
  586. err = ceph_handle_notrace_create(dir, dentry);
  587. ceph_mdsc_put_request(req);
  588. if (err)
  589. d_drop(dentry);
  590. return err;
  591. }
  592. static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
  593. struct nameidata *nd)
  594. {
  595. dout("create in dir %p dentry %p name '%.*s'\n",
  596. dir, dentry, dentry->d_name.len, dentry->d_name.name);
  597. if (ceph_snap(dir) != CEPH_NOSNAP)
  598. return -EROFS;
  599. if (nd) {
  600. BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
  601. dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
  602. /* hrm, what should i do here if we get aliased? */
  603. if (IS_ERR(dentry))
  604. return PTR_ERR(dentry);
  605. return 0;
  606. }
  607. /* fall back to mknod */
  608. return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
  609. }
  610. static int ceph_symlink(struct inode *dir, struct dentry *dentry,
  611. const char *dest)
  612. {
  613. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  614. struct ceph_mds_client *mdsc = fsc->mdsc;
  615. struct ceph_mds_request *req;
  616. int err;
  617. if (ceph_snap(dir) != CEPH_NOSNAP)
  618. return -EROFS;
  619. dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
  620. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
  621. if (IS_ERR(req)) {
  622. d_drop(dentry);
  623. return PTR_ERR(req);
  624. }
  625. req->r_dentry = dget(dentry);
  626. req->r_num_caps = 2;
  627. req->r_path2 = kstrdup(dest, GFP_NOFS);
  628. req->r_locked_dir = dir;
  629. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  630. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  631. err = ceph_mdsc_do_request(mdsc, dir, req);
  632. if (!err && !req->r_reply_info.head->is_dentry)
  633. err = ceph_handle_notrace_create(dir, dentry);
  634. ceph_mdsc_put_request(req);
  635. if (err)
  636. d_drop(dentry);
  637. return err;
  638. }
  639. static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
  640. {
  641. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  642. struct ceph_mds_client *mdsc = fsc->mdsc;
  643. struct ceph_mds_request *req;
  644. int err = -EROFS;
  645. int op;
  646. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  647. /* mkdir .snap/foo is a MKSNAP */
  648. op = CEPH_MDS_OP_MKSNAP;
  649. dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
  650. dentry->d_name.len, dentry->d_name.name, dentry);
  651. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  652. dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
  653. op = CEPH_MDS_OP_MKDIR;
  654. } else {
  655. goto out;
  656. }
  657. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  658. if (IS_ERR(req)) {
  659. err = PTR_ERR(req);
  660. goto out;
  661. }
  662. req->r_dentry = dget(dentry);
  663. req->r_num_caps = 2;
  664. req->r_locked_dir = dir;
  665. req->r_args.mkdir.mode = cpu_to_le32(mode);
  666. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  667. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  668. err = ceph_mdsc_do_request(mdsc, dir, req);
  669. if (!err && !req->r_reply_info.head->is_dentry)
  670. err = ceph_handle_notrace_create(dir, dentry);
  671. ceph_mdsc_put_request(req);
  672. out:
  673. if (err < 0)
  674. d_drop(dentry);
  675. return err;
  676. }
  677. static int ceph_link(struct dentry *old_dentry, struct inode *dir,
  678. struct dentry *dentry)
  679. {
  680. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  681. struct ceph_mds_client *mdsc = fsc->mdsc;
  682. struct ceph_mds_request *req;
  683. int err;
  684. if (ceph_snap(dir) != CEPH_NOSNAP)
  685. return -EROFS;
  686. dout("link in dir %p old_dentry %p dentry %p\n", dir,
  687. old_dentry, dentry);
  688. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
  689. if (IS_ERR(req)) {
  690. d_drop(dentry);
  691. return PTR_ERR(req);
  692. }
  693. req->r_dentry = dget(dentry);
  694. req->r_num_caps = 2;
  695. req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
  696. req->r_locked_dir = dir;
  697. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  698. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  699. err = ceph_mdsc_do_request(mdsc, dir, req);
  700. if (err)
  701. d_drop(dentry);
  702. else if (!req->r_reply_info.head->is_dentry)
  703. d_instantiate(dentry, igrab(old_dentry->d_inode));
  704. ceph_mdsc_put_request(req);
  705. return err;
  706. }
  707. /*
  708. * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
  709. * looks like the link count will hit 0, drop any other caps (other
  710. * than PIN) we don't specifically want (due to the file still being
  711. * open).
  712. */
  713. static int drop_caps_for_unlink(struct inode *inode)
  714. {
  715. struct ceph_inode_info *ci = ceph_inode(inode);
  716. int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
  717. spin_lock(&inode->i_lock);
  718. if (inode->i_nlink == 1) {
  719. drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
  720. ci->i_ceph_flags |= CEPH_I_NODELAY;
  721. }
  722. spin_unlock(&inode->i_lock);
  723. return drop;
  724. }
  725. /*
  726. * rmdir and unlink are differ only by the metadata op code
  727. */
  728. static int ceph_unlink(struct inode *dir, struct dentry *dentry)
  729. {
  730. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  731. struct ceph_mds_client *mdsc = fsc->mdsc;
  732. struct inode *inode = dentry->d_inode;
  733. struct ceph_mds_request *req;
  734. int err = -EROFS;
  735. int op;
  736. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  737. /* rmdir .snap/foo is RMSNAP */
  738. dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
  739. dentry->d_name.name, dentry);
  740. op = CEPH_MDS_OP_RMSNAP;
  741. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  742. dout("unlink/rmdir dir %p dn %p inode %p\n",
  743. dir, dentry, inode);
  744. op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
  745. CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
  746. } else
  747. goto out;
  748. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  749. if (IS_ERR(req)) {
  750. err = PTR_ERR(req);
  751. goto out;
  752. }
  753. req->r_dentry = dget(dentry);
  754. req->r_num_caps = 2;
  755. req->r_locked_dir = dir;
  756. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  757. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  758. req->r_inode_drop = drop_caps_for_unlink(inode);
  759. err = ceph_mdsc_do_request(mdsc, dir, req);
  760. if (!err && !req->r_reply_info.head->is_dentry)
  761. d_delete(dentry);
  762. ceph_mdsc_put_request(req);
  763. out:
  764. return err;
  765. }
  766. static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
  767. struct inode *new_dir, struct dentry *new_dentry)
  768. {
  769. struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
  770. struct ceph_mds_client *mdsc = fsc->mdsc;
  771. struct ceph_mds_request *req;
  772. int err;
  773. if (ceph_snap(old_dir) != ceph_snap(new_dir))
  774. return -EXDEV;
  775. if (ceph_snap(old_dir) != CEPH_NOSNAP ||
  776. ceph_snap(new_dir) != CEPH_NOSNAP)
  777. return -EROFS;
  778. dout("rename dir %p dentry %p to dir %p dentry %p\n",
  779. old_dir, old_dentry, new_dir, new_dentry);
  780. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
  781. if (IS_ERR(req))
  782. return PTR_ERR(req);
  783. req->r_dentry = dget(new_dentry);
  784. req->r_num_caps = 2;
  785. req->r_old_dentry = dget(old_dentry);
  786. req->r_locked_dir = new_dir;
  787. req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
  788. req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
  789. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  790. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  791. /* release LINK_RDCACHE on source inode (mds will lock it) */
  792. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  793. if (new_dentry->d_inode)
  794. req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
  795. err = ceph_mdsc_do_request(mdsc, old_dir, req);
  796. if (!err && !req->r_reply_info.head->is_dentry) {
  797. /*
  798. * Normally d_move() is done by fill_trace (called by
  799. * do_request, above). If there is no trace, we need
  800. * to do it here.
  801. */
  802. /* d_move screws up d_subdirs order */
  803. ceph_i_clear(new_dir, CEPH_I_COMPLETE);
  804. d_move(old_dentry, new_dentry);
  805. /* ensure target dentry is invalidated, despite
  806. rehashing bug in vfs_rename_dir */
  807. ceph_invalidate_dentry_lease(new_dentry);
  808. }
  809. ceph_mdsc_put_request(req);
  810. return err;
  811. }
  812. /*
  813. * Ensure a dentry lease will no longer revalidate.
  814. */
  815. void ceph_invalidate_dentry_lease(struct dentry *dentry)
  816. {
  817. spin_lock(&dentry->d_lock);
  818. dentry->d_time = jiffies;
  819. ceph_dentry(dentry)->lease_shared_gen = 0;
  820. spin_unlock(&dentry->d_lock);
  821. }
  822. /*
  823. * Check if dentry lease is valid. If not, delete the lease. Try to
  824. * renew if the least is more than half up.
  825. */
  826. static int dentry_lease_is_valid(struct dentry *dentry)
  827. {
  828. struct ceph_dentry_info *di;
  829. struct ceph_mds_session *s;
  830. int valid = 0;
  831. u32 gen;
  832. unsigned long ttl;
  833. struct ceph_mds_session *session = NULL;
  834. struct inode *dir = NULL;
  835. u32 seq = 0;
  836. spin_lock(&dentry->d_lock);
  837. di = ceph_dentry(dentry);
  838. if (di && di->lease_session) {
  839. s = di->lease_session;
  840. spin_lock(&s->s_cap_lock);
  841. gen = s->s_cap_gen;
  842. ttl = s->s_cap_ttl;
  843. spin_unlock(&s->s_cap_lock);
  844. if (di->lease_gen == gen &&
  845. time_before(jiffies, dentry->d_time) &&
  846. time_before(jiffies, ttl)) {
  847. valid = 1;
  848. if (di->lease_renew_after &&
  849. time_after(jiffies, di->lease_renew_after)) {
  850. /* we should renew */
  851. dir = dentry->d_parent->d_inode;
  852. session = ceph_get_mds_session(s);
  853. seq = di->lease_seq;
  854. di->lease_renew_after = 0;
  855. di->lease_renew_from = jiffies;
  856. }
  857. }
  858. }
  859. spin_unlock(&dentry->d_lock);
  860. if (session) {
  861. ceph_mdsc_lease_send_msg(session, dir, dentry,
  862. CEPH_MDS_LEASE_RENEW, seq);
  863. ceph_put_mds_session(session);
  864. }
  865. dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
  866. return valid;
  867. }
  868. /*
  869. * Check if directory-wide content lease/cap is valid.
  870. */
  871. static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
  872. {
  873. struct ceph_inode_info *ci = ceph_inode(dir);
  874. struct ceph_dentry_info *di = ceph_dentry(dentry);
  875. int valid = 0;
  876. spin_lock(&dir->i_lock);
  877. if (ci->i_shared_gen == di->lease_shared_gen)
  878. valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
  879. spin_unlock(&dir->i_lock);
  880. dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
  881. dir, (unsigned)ci->i_shared_gen, dentry,
  882. (unsigned)di->lease_shared_gen, valid);
  883. return valid;
  884. }
  885. /*
  886. * Check if cached dentry can be trusted.
  887. */
  888. static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
  889. {
  890. struct inode *dir = dentry->d_parent->d_inode;
  891. dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
  892. dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
  893. ceph_dentry(dentry)->offset);
  894. /* always trust cached snapped dentries, snapdir dentry */
  895. if (ceph_snap(dir) != CEPH_NOSNAP) {
  896. dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
  897. dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
  898. goto out_touch;
  899. }
  900. if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
  901. goto out_touch;
  902. if (dentry_lease_is_valid(dentry) ||
  903. dir_lease_is_valid(dir, dentry))
  904. goto out_touch;
  905. dout("d_revalidate %p invalid\n", dentry);
  906. d_drop(dentry);
  907. return 0;
  908. out_touch:
  909. ceph_dentry_lru_touch(dentry);
  910. return 1;
  911. }
  912. /*
  913. * When a dentry is released, clear the dir I_COMPLETE if it was part
  914. * of the current dir gen or if this is in the snapshot namespace.
  915. */
  916. static void ceph_dentry_release(struct dentry *dentry)
  917. {
  918. struct ceph_dentry_info *di = ceph_dentry(dentry);
  919. struct inode *parent_inode = NULL;
  920. u64 snapid = CEPH_NOSNAP;
  921. if (!IS_ROOT(dentry)) {
  922. parent_inode = dentry->d_parent->d_inode;
  923. if (parent_inode)
  924. snapid = ceph_snap(parent_inode);
  925. }
  926. dout("dentry_release %p parent %p\n", dentry, parent_inode);
  927. if (parent_inode && snapid != CEPH_SNAPDIR) {
  928. struct ceph_inode_info *ci = ceph_inode(parent_inode);
  929. spin_lock(&parent_inode->i_lock);
  930. if (ci->i_shared_gen == di->lease_shared_gen ||
  931. snapid <= CEPH_MAXSNAP) {
  932. dout(" clearing %p complete (d_release)\n",
  933. parent_inode);
  934. ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
  935. ci->i_release_count++;
  936. }
  937. spin_unlock(&parent_inode->i_lock);
  938. }
  939. if (di) {
  940. ceph_dentry_lru_del(dentry);
  941. if (di->lease_session)
  942. ceph_put_mds_session(di->lease_session);
  943. kmem_cache_free(ceph_dentry_cachep, di);
  944. dentry->d_fsdata = NULL;
  945. }
  946. }
  947. static int ceph_snapdir_d_revalidate(struct dentry *dentry,
  948. struct nameidata *nd)
  949. {
  950. /*
  951. * Eventually, we'll want to revalidate snapped metadata
  952. * too... probably...
  953. */
  954. return 1;
  955. }
  956. /*
  957. * read() on a dir. This weird interface hack only works if mounted
  958. * with '-o dirstat'.
  959. */
  960. static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
  961. loff_t *ppos)
  962. {
  963. struct ceph_file_info *cf = file->private_data;
  964. struct inode *inode = file->f_dentry->d_inode;
  965. struct ceph_inode_info *ci = ceph_inode(inode);
  966. int left;
  967. if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
  968. return -EISDIR;
  969. if (!cf->dir_info) {
  970. cf->dir_info = kmalloc(1024, GFP_NOFS);
  971. if (!cf->dir_info)
  972. return -ENOMEM;
  973. cf->dir_info_len =
  974. sprintf(cf->dir_info,
  975. "entries: %20lld\n"
  976. " files: %20lld\n"
  977. " subdirs: %20lld\n"
  978. "rentries: %20lld\n"
  979. " rfiles: %20lld\n"
  980. " rsubdirs: %20lld\n"
  981. "rbytes: %20lld\n"
  982. "rctime: %10ld.%09ld\n",
  983. ci->i_files + ci->i_subdirs,
  984. ci->i_files,
  985. ci->i_subdirs,
  986. ci->i_rfiles + ci->i_rsubdirs,
  987. ci->i_rfiles,
  988. ci->i_rsubdirs,
  989. ci->i_rbytes,
  990. (long)ci->i_rctime.tv_sec,
  991. (long)ci->i_rctime.tv_nsec);
  992. }
  993. if (*ppos >= cf->dir_info_len)
  994. return 0;
  995. size = min_t(unsigned, size, cf->dir_info_len-*ppos);
  996. left = copy_to_user(buf, cf->dir_info + *ppos, size);
  997. if (left == size)
  998. return -EFAULT;
  999. *ppos += (size - left);
  1000. return size - left;
  1001. }
  1002. /*
  1003. * an fsync() on a dir will wait for any uncommitted directory
  1004. * operations to commit.
  1005. */
  1006. static int ceph_dir_fsync(struct file *file, int datasync)
  1007. {
  1008. struct inode *inode = file->f_path.dentry->d_inode;
  1009. struct ceph_inode_info *ci = ceph_inode(inode);
  1010. struct list_head *head = &ci->i_unsafe_dirops;
  1011. struct ceph_mds_request *req;
  1012. u64 last_tid;
  1013. int ret = 0;
  1014. dout("dir_fsync %p\n", inode);
  1015. spin_lock(&ci->i_unsafe_lock);
  1016. if (list_empty(head))
  1017. goto out;
  1018. req = list_entry(head->prev,
  1019. struct ceph_mds_request, r_unsafe_dir_item);
  1020. last_tid = req->r_tid;
  1021. do {
  1022. ceph_mdsc_get_request(req);
  1023. spin_unlock(&ci->i_unsafe_lock);
  1024. dout("dir_fsync %p wait on tid %llu (until %llu)\n",
  1025. inode, req->r_tid, last_tid);
  1026. if (req->r_timeout) {
  1027. ret = wait_for_completion_timeout(
  1028. &req->r_safe_completion, req->r_timeout);
  1029. if (ret > 0)
  1030. ret = 0;
  1031. else if (ret == 0)
  1032. ret = -EIO; /* timed out */
  1033. } else {
  1034. wait_for_completion(&req->r_safe_completion);
  1035. }
  1036. spin_lock(&ci->i_unsafe_lock);
  1037. ceph_mdsc_put_request(req);
  1038. if (ret || list_empty(head))
  1039. break;
  1040. req = list_entry(head->next,
  1041. struct ceph_mds_request, r_unsafe_dir_item);
  1042. } while (req->r_tid < last_tid);
  1043. out:
  1044. spin_unlock(&ci->i_unsafe_lock);
  1045. return ret;
  1046. }
  1047. /*
  1048. * We maintain a private dentry LRU.
  1049. *
  1050. * FIXME: this needs to be changed to a per-mds lru to be useful.
  1051. */
  1052. void ceph_dentry_lru_add(struct dentry *dn)
  1053. {
  1054. struct ceph_dentry_info *di = ceph_dentry(dn);
  1055. struct ceph_mds_client *mdsc;
  1056. dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
  1057. dn->d_name.len, dn->d_name.name);
  1058. if (di) {
  1059. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1060. spin_lock(&mdsc->dentry_lru_lock);
  1061. list_add_tail(&di->lru, &mdsc->dentry_lru);
  1062. mdsc->num_dentry++;
  1063. spin_unlock(&mdsc->dentry_lru_lock);
  1064. }
  1065. }
  1066. void ceph_dentry_lru_touch(struct dentry *dn)
  1067. {
  1068. struct ceph_dentry_info *di = ceph_dentry(dn);
  1069. struct ceph_mds_client *mdsc;
  1070. dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
  1071. dn->d_name.len, dn->d_name.name, di->offset);
  1072. if (di) {
  1073. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1074. spin_lock(&mdsc->dentry_lru_lock);
  1075. list_move_tail(&di->lru, &mdsc->dentry_lru);
  1076. spin_unlock(&mdsc->dentry_lru_lock);
  1077. }
  1078. }
  1079. void ceph_dentry_lru_del(struct dentry *dn)
  1080. {
  1081. struct ceph_dentry_info *di = ceph_dentry(dn);
  1082. struct ceph_mds_client *mdsc;
  1083. dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
  1084. dn->d_name.len, dn->d_name.name);
  1085. if (di) {
  1086. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1087. spin_lock(&mdsc->dentry_lru_lock);
  1088. list_del_init(&di->lru);
  1089. mdsc->num_dentry--;
  1090. spin_unlock(&mdsc->dentry_lru_lock);
  1091. }
  1092. }
  1093. const struct file_operations ceph_dir_fops = {
  1094. .read = ceph_read_dir,
  1095. .readdir = ceph_readdir,
  1096. .llseek = ceph_dir_llseek,
  1097. .open = ceph_open,
  1098. .release = ceph_release,
  1099. .unlocked_ioctl = ceph_ioctl,
  1100. .fsync = ceph_dir_fsync,
  1101. };
  1102. const struct inode_operations ceph_dir_iops = {
  1103. .lookup = ceph_lookup,
  1104. .permission = ceph_permission,
  1105. .getattr = ceph_getattr,
  1106. .setattr = ceph_setattr,
  1107. .setxattr = ceph_setxattr,
  1108. .getxattr = ceph_getxattr,
  1109. .listxattr = ceph_listxattr,
  1110. .removexattr = ceph_removexattr,
  1111. .mknod = ceph_mknod,
  1112. .symlink = ceph_symlink,
  1113. .mkdir = ceph_mkdir,
  1114. .link = ceph_link,
  1115. .unlink = ceph_unlink,
  1116. .rmdir = ceph_unlink,
  1117. .rename = ceph_rename,
  1118. .create = ceph_create,
  1119. };
  1120. const struct dentry_operations ceph_dentry_ops = {
  1121. .d_revalidate = ceph_d_revalidate,
  1122. .d_release = ceph_dentry_release,
  1123. };
  1124. const struct dentry_operations ceph_snapdir_dentry_ops = {
  1125. .d_revalidate = ceph_snapdir_d_revalidate,
  1126. .d_release = ceph_dentry_release,
  1127. };
  1128. const struct dentry_operations ceph_snap_dentry_ops = {
  1129. .d_release = ceph_dentry_release,
  1130. };