dir.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/spinlock.h>
  3. #include <linux/fs_struct.h>
  4. #include <linux/namei.h>
  5. #include <linux/slab.h>
  6. #include <linux/sched.h>
  7. #include "super.h"
  8. #include "mds_client.h"
  9. /*
  10. * Directory operations: readdir, lookup, create, link, unlink,
  11. * rename, etc.
  12. */
  13. /*
  14. * Ceph MDS operations are specified in terms of a base ino and
  15. * relative path. Thus, the client can specify an operation on a
  16. * specific inode (e.g., a getattr due to fstat(2)), or as a path
  17. * relative to, say, the root directory.
  18. *
  19. * Normally, we limit ourselves to strict inode ops (no path component)
  20. * or dentry operations (a single path component relative to an ino). The
  21. * exception to this is open_root_dentry(), which will open the mount
  22. * point by name.
  23. */
  24. const struct inode_operations ceph_dir_iops;
  25. const struct file_operations ceph_dir_fops;
  26. const struct dentry_operations ceph_dentry_ops;
  27. /*
  28. * Initialize ceph dentry state.
  29. */
  30. int ceph_init_dentry(struct dentry *dentry)
  31. {
  32. struct ceph_dentry_info *di;
  33. if (dentry->d_fsdata)
  34. return 0;
  35. di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
  36. if (!di)
  37. return -ENOMEM; /* oh well */
  38. spin_lock(&dentry->d_lock);
  39. if (dentry->d_fsdata) {
  40. /* lost a race */
  41. kmem_cache_free(ceph_dentry_cachep, di);
  42. goto out_unlock;
  43. }
  44. if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
  45. d_set_d_op(dentry, &ceph_dentry_ops);
  46. else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
  47. d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
  48. else
  49. d_set_d_op(dentry, &ceph_snap_dentry_ops);
  50. di->dentry = dentry;
  51. di->lease_session = NULL;
  52. dentry->d_time = jiffies;
  53. /* avoid reordering d_fsdata setup so that the check above is safe */
  54. smp_mb();
  55. dentry->d_fsdata = di;
  56. ceph_dentry_lru_add(dentry);
  57. out_unlock:
  58. spin_unlock(&dentry->d_lock);
  59. return 0;
  60. }
  61. struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
  62. {
  63. struct inode *inode = NULL;
  64. if (!dentry)
  65. return NULL;
  66. spin_lock(&dentry->d_lock);
  67. if (!IS_ROOT(dentry)) {
  68. inode = dentry->d_parent->d_inode;
  69. ihold(inode);
  70. }
  71. spin_unlock(&dentry->d_lock);
  72. return inode;
  73. }
  74. /*
  75. * for readdir, we encode the directory frag and offset within that
  76. * frag into f_pos.
  77. */
  78. static unsigned fpos_frag(loff_t p)
  79. {
  80. return p >> 32;
  81. }
  82. static unsigned fpos_off(loff_t p)
  83. {
  84. return p & 0xffffffff;
  85. }
  86. /*
  87. * When possible, we try to satisfy a readdir by peeking at the
  88. * dcache. We make this work by carefully ordering dentries on
  89. * d_u.d_child when we initially get results back from the MDS, and
  90. * falling back to a "normal" sync readdir if any dentries in the dir
  91. * are dropped.
  92. *
  93. * Complete dir indicates that we have all dentries in the dir. It is
  94. * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
  95. * the MDS if/when the directory is modified).
  96. */
  97. static int __dcache_readdir(struct file *filp,
  98. void *dirent, filldir_t filldir)
  99. {
  100. struct ceph_file_info *fi = filp->private_data;
  101. struct dentry *parent = filp->f_dentry;
  102. struct inode *dir = parent->d_inode;
  103. struct list_head *p;
  104. struct dentry *dentry, *last;
  105. struct ceph_dentry_info *di;
  106. int err = 0;
  107. /* claim ref on last dentry we returned */
  108. last = fi->dentry;
  109. fi->dentry = NULL;
  110. dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
  111. last);
  112. spin_lock(&parent->d_lock);
  113. /* start at beginning? */
  114. if (filp->f_pos == 2 || last == NULL ||
  115. filp->f_pos < ceph_dentry(last)->offset) {
  116. if (list_empty(&parent->d_subdirs))
  117. goto out_unlock;
  118. p = parent->d_subdirs.prev;
  119. dout(" initial p %p/%p\n", p->prev, p->next);
  120. } else {
  121. p = last->d_u.d_child.prev;
  122. }
  123. more:
  124. dentry = list_entry(p, struct dentry, d_u.d_child);
  125. di = ceph_dentry(dentry);
  126. while (1) {
  127. dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
  128. d_unhashed(dentry) ? "!hashed" : "hashed",
  129. parent->d_subdirs.prev, parent->d_subdirs.next);
  130. if (p == &parent->d_subdirs) {
  131. fi->flags |= CEPH_F_ATEND;
  132. goto out_unlock;
  133. }
  134. spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
  135. if (!d_unhashed(dentry) && dentry->d_inode &&
  136. ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
  137. ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
  138. filp->f_pos <= di->offset)
  139. break;
  140. dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
  141. dentry->d_name.len, dentry->d_name.name, di->offset,
  142. filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
  143. !dentry->d_inode ? " null" : "");
  144. spin_unlock(&dentry->d_lock);
  145. p = p->prev;
  146. dentry = list_entry(p, struct dentry, d_u.d_child);
  147. di = ceph_dentry(dentry);
  148. }
  149. dget_dlock(dentry);
  150. spin_unlock(&dentry->d_lock);
  151. spin_unlock(&parent->d_lock);
  152. dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
  153. dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
  154. filp->f_pos = di->offset;
  155. err = filldir(dirent, dentry->d_name.name,
  156. dentry->d_name.len, di->offset,
  157. ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
  158. dentry->d_inode->i_mode >> 12);
  159. if (last) {
  160. if (err < 0) {
  161. /* remember our position */
  162. fi->dentry = last;
  163. fi->next_offset = di->offset;
  164. } else {
  165. dput(last);
  166. }
  167. }
  168. last = dentry;
  169. if (err < 0)
  170. goto out;
  171. filp->f_pos++;
  172. /* make sure a dentry wasn't dropped while we didn't have parent lock */
  173. if (!ceph_dir_is_complete(dir)) {
  174. dout(" lost dir complete on %p; falling back to mds\n", dir);
  175. err = -EAGAIN;
  176. goto out;
  177. }
  178. spin_lock(&parent->d_lock);
  179. p = p->prev; /* advance to next dentry */
  180. goto more;
  181. out_unlock:
  182. spin_unlock(&parent->d_lock);
  183. out:
  184. if (last)
  185. dput(last);
  186. return err;
  187. }
  188. /*
  189. * make note of the last dentry we read, so we can
  190. * continue at the same lexicographical point,
  191. * regardless of what dir changes take place on the
  192. * server.
  193. */
  194. static int note_last_dentry(struct ceph_file_info *fi, const char *name,
  195. int len)
  196. {
  197. kfree(fi->last_name);
  198. fi->last_name = kmalloc(len+1, GFP_NOFS);
  199. if (!fi->last_name)
  200. return -ENOMEM;
  201. memcpy(fi->last_name, name, len);
  202. fi->last_name[len] = 0;
  203. dout("note_last_dentry '%s'\n", fi->last_name);
  204. return 0;
  205. }
  206. static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
  207. {
  208. struct ceph_file_info *fi = filp->private_data;
  209. struct inode *inode = file_inode(filp);
  210. struct ceph_inode_info *ci = ceph_inode(inode);
  211. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  212. struct ceph_mds_client *mdsc = fsc->mdsc;
  213. unsigned frag = fpos_frag(filp->f_pos);
  214. int off = fpos_off(filp->f_pos);
  215. int err;
  216. u32 ftype;
  217. struct ceph_mds_reply_info_parsed *rinfo;
  218. const int max_entries = fsc->mount_options->max_readdir;
  219. const int max_bytes = fsc->mount_options->max_readdir_bytes;
  220. dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
  221. if (fi->flags & CEPH_F_ATEND)
  222. return 0;
  223. /* always start with . and .. */
  224. if (filp->f_pos == 0) {
  225. /* note dir version at start of readdir so we can tell
  226. * if any dentries get dropped */
  227. fi->dir_release_count = atomic_read(&ci->i_release_count);
  228. dout("readdir off 0 -> '.'\n");
  229. if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
  230. ceph_translate_ino(inode->i_sb, inode->i_ino),
  231. inode->i_mode >> 12) < 0)
  232. return 0;
  233. filp->f_pos = 1;
  234. off = 1;
  235. }
  236. if (filp->f_pos == 1) {
  237. ino_t ino = parent_ino(filp->f_dentry);
  238. dout("readdir off 1 -> '..'\n");
  239. if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
  240. ceph_translate_ino(inode->i_sb, ino),
  241. inode->i_mode >> 12) < 0)
  242. return 0;
  243. filp->f_pos = 2;
  244. off = 2;
  245. }
  246. /* can we use the dcache? */
  247. spin_lock(&ci->i_ceph_lock);
  248. if ((filp->f_pos == 2 || fi->dentry) &&
  249. !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
  250. ceph_snap(inode) != CEPH_SNAPDIR &&
  251. __ceph_dir_is_complete(ci) &&
  252. __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
  253. spin_unlock(&ci->i_ceph_lock);
  254. err = __dcache_readdir(filp, dirent, filldir);
  255. if (err != -EAGAIN)
  256. return err;
  257. } else {
  258. spin_unlock(&ci->i_ceph_lock);
  259. }
  260. if (fi->dentry) {
  261. err = note_last_dentry(fi, fi->dentry->d_name.name,
  262. fi->dentry->d_name.len);
  263. if (err)
  264. return err;
  265. dput(fi->dentry);
  266. fi->dentry = NULL;
  267. }
  268. /* proceed with a normal readdir */
  269. more:
  270. /* do we have the correct frag content buffered? */
  271. if (fi->frag != frag || fi->last_readdir == NULL) {
  272. struct ceph_mds_request *req;
  273. int op = ceph_snap(inode) == CEPH_SNAPDIR ?
  274. CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
  275. /* discard old result, if any */
  276. if (fi->last_readdir) {
  277. ceph_mdsc_put_request(fi->last_readdir);
  278. fi->last_readdir = NULL;
  279. }
  280. /* requery frag tree, as the frag topology may have changed */
  281. frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
  282. dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
  283. ceph_vinop(inode), frag, fi->last_name);
  284. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  285. if (IS_ERR(req))
  286. return PTR_ERR(req);
  287. req->r_inode = inode;
  288. ihold(inode);
  289. req->r_dentry = dget(filp->f_dentry);
  290. /* hints to request -> mds selection code */
  291. req->r_direct_mode = USE_AUTH_MDS;
  292. req->r_direct_hash = ceph_frag_value(frag);
  293. req->r_direct_is_hash = true;
  294. req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
  295. req->r_readdir_offset = fi->next_offset;
  296. req->r_args.readdir.frag = cpu_to_le32(frag);
  297. req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
  298. req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
  299. req->r_num_caps = max_entries + 1;
  300. err = ceph_mdsc_do_request(mdsc, NULL, req);
  301. if (err < 0) {
  302. ceph_mdsc_put_request(req);
  303. return err;
  304. }
  305. dout("readdir got and parsed readdir result=%d"
  306. " on frag %x, end=%d, complete=%d\n", err, frag,
  307. (int)req->r_reply_info.dir_end,
  308. (int)req->r_reply_info.dir_complete);
  309. if (!req->r_did_prepopulate) {
  310. dout("readdir !did_prepopulate");
  311. /* preclude from marking dir complete */
  312. fi->dir_release_count--;
  313. }
  314. /* note next offset and last dentry name */
  315. fi->offset = fi->next_offset;
  316. fi->last_readdir = req;
  317. if (req->r_reply_info.dir_end) {
  318. kfree(fi->last_name);
  319. fi->last_name = NULL;
  320. if (ceph_frag_is_rightmost(frag))
  321. fi->next_offset = 2;
  322. else
  323. fi->next_offset = 0;
  324. } else {
  325. rinfo = &req->r_reply_info;
  326. err = note_last_dentry(fi,
  327. rinfo->dir_dname[rinfo->dir_nr-1],
  328. rinfo->dir_dname_len[rinfo->dir_nr-1]);
  329. if (err)
  330. return err;
  331. fi->next_offset += rinfo->dir_nr;
  332. }
  333. }
  334. rinfo = &fi->last_readdir->r_reply_info;
  335. dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
  336. rinfo->dir_nr, off, fi->offset);
  337. while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
  338. u64 pos = ceph_make_fpos(frag, off);
  339. struct ceph_mds_reply_inode *in =
  340. rinfo->dir_in[off - fi->offset].in;
  341. struct ceph_vino vino;
  342. ino_t ino;
  343. dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
  344. off, off - fi->offset, rinfo->dir_nr, pos,
  345. rinfo->dir_dname_len[off - fi->offset],
  346. rinfo->dir_dname[off - fi->offset], in);
  347. BUG_ON(!in);
  348. ftype = le32_to_cpu(in->mode) >> 12;
  349. vino.ino = le64_to_cpu(in->ino);
  350. vino.snap = le64_to_cpu(in->snapid);
  351. ino = ceph_vino_to_ino(vino);
  352. if (filldir(dirent,
  353. rinfo->dir_dname[off - fi->offset],
  354. rinfo->dir_dname_len[off - fi->offset],
  355. pos,
  356. ceph_translate_ino(inode->i_sb, ino), ftype) < 0) {
  357. dout("filldir stopping us...\n");
  358. return 0;
  359. }
  360. off++;
  361. filp->f_pos = pos + 1;
  362. }
  363. if (fi->last_name) {
  364. ceph_mdsc_put_request(fi->last_readdir);
  365. fi->last_readdir = NULL;
  366. goto more;
  367. }
  368. /* more frags? */
  369. if (!ceph_frag_is_rightmost(frag)) {
  370. frag = ceph_frag_next(frag);
  371. off = 0;
  372. filp->f_pos = ceph_make_fpos(frag, off);
  373. dout("readdir next frag is %x\n", frag);
  374. goto more;
  375. }
  376. fi->flags |= CEPH_F_ATEND;
  377. /*
  378. * if dir_release_count still matches the dir, no dentries
  379. * were released during the whole readdir, and we should have
  380. * the complete dir contents in our cache.
  381. */
  382. spin_lock(&ci->i_ceph_lock);
  383. if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
  384. dout(" marking %p complete\n", inode);
  385. __ceph_dir_set_complete(ci, fi->dir_release_count);
  386. ci->i_max_offset = filp->f_pos;
  387. }
  388. spin_unlock(&ci->i_ceph_lock);
  389. dout("readdir %p filp %p done.\n", inode, filp);
  390. return 0;
  391. }
  392. static void reset_readdir(struct ceph_file_info *fi)
  393. {
  394. if (fi->last_readdir) {
  395. ceph_mdsc_put_request(fi->last_readdir);
  396. fi->last_readdir = NULL;
  397. }
  398. kfree(fi->last_name);
  399. fi->last_name = NULL;
  400. fi->next_offset = 2; /* compensate for . and .. */
  401. if (fi->dentry) {
  402. dput(fi->dentry);
  403. fi->dentry = NULL;
  404. }
  405. fi->flags &= ~CEPH_F_ATEND;
  406. }
  407. static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
  408. {
  409. struct ceph_file_info *fi = file->private_data;
  410. struct inode *inode = file->f_mapping->host;
  411. loff_t old_offset = offset;
  412. loff_t retval;
  413. mutex_lock(&inode->i_mutex);
  414. retval = -EINVAL;
  415. switch (whence) {
  416. case SEEK_END:
  417. offset += inode->i_size + 2; /* FIXME */
  418. break;
  419. case SEEK_CUR:
  420. offset += file->f_pos;
  421. case SEEK_SET:
  422. break;
  423. default:
  424. goto out;
  425. }
  426. if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
  427. if (offset != file->f_pos) {
  428. file->f_pos = offset;
  429. file->f_version = 0;
  430. fi->flags &= ~CEPH_F_ATEND;
  431. }
  432. retval = offset;
  433. /*
  434. * discard buffered readdir content on seekdir(0), or
  435. * seek to new frag, or seek prior to current chunk.
  436. */
  437. if (offset == 0 ||
  438. fpos_frag(offset) != fpos_frag(old_offset) ||
  439. fpos_off(offset) < fi->offset) {
  440. dout("dir_llseek dropping %p content\n", file);
  441. reset_readdir(fi);
  442. }
  443. /* bump dir_release_count if we did a forward seek */
  444. if (offset > old_offset)
  445. fi->dir_release_count--;
  446. }
  447. out:
  448. mutex_unlock(&inode->i_mutex);
  449. return retval;
  450. }
  451. /*
  452. * Handle lookups for the hidden .snap directory.
  453. */
  454. int ceph_handle_snapdir(struct ceph_mds_request *req,
  455. struct dentry *dentry, int err)
  456. {
  457. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  458. struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
  459. /* .snap dir? */
  460. if (err == -ENOENT &&
  461. ceph_snap(parent) == CEPH_NOSNAP &&
  462. strcmp(dentry->d_name.name,
  463. fsc->mount_options->snapdir_name) == 0) {
  464. struct inode *inode = ceph_get_snapdir(parent);
  465. dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
  466. dentry, dentry->d_name.len, dentry->d_name.name, inode);
  467. BUG_ON(!d_unhashed(dentry));
  468. d_add(dentry, inode);
  469. err = 0;
  470. }
  471. return err;
  472. }
  473. /*
  474. * Figure out final result of a lookup/open request.
  475. *
  476. * Mainly, make sure we return the final req->r_dentry (if it already
  477. * existed) in place of the original VFS-provided dentry when they
  478. * differ.
  479. *
  480. * Gracefully handle the case where the MDS replies with -ENOENT and
  481. * no trace (which it may do, at its discretion, e.g., if it doesn't
  482. * care to issue a lease on the negative dentry).
  483. */
  484. struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
  485. struct dentry *dentry, int err)
  486. {
  487. if (err == -ENOENT) {
  488. /* no trace? */
  489. err = 0;
  490. if (!req->r_reply_info.head->is_dentry) {
  491. dout("ENOENT and no trace, dentry %p inode %p\n",
  492. dentry, dentry->d_inode);
  493. if (dentry->d_inode) {
  494. d_drop(dentry);
  495. err = -ENOENT;
  496. } else {
  497. d_add(dentry, NULL);
  498. }
  499. }
  500. }
  501. if (err)
  502. dentry = ERR_PTR(err);
  503. else if (dentry != req->r_dentry)
  504. dentry = dget(req->r_dentry); /* we got spliced */
  505. else
  506. dentry = NULL;
  507. return dentry;
  508. }
  509. static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
  510. {
  511. return ceph_ino(inode) == CEPH_INO_ROOT &&
  512. strncmp(dentry->d_name.name, ".ceph", 5) == 0;
  513. }
  514. /*
  515. * Look up a single dir entry. If there is a lookup intent, inform
  516. * the MDS so that it gets our 'caps wanted' value in a single op.
  517. */
  518. static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
  519. unsigned int flags)
  520. {
  521. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  522. struct ceph_mds_client *mdsc = fsc->mdsc;
  523. struct ceph_mds_request *req;
  524. int op;
  525. int err;
  526. dout("lookup %p dentry %p '%.*s'\n",
  527. dir, dentry, dentry->d_name.len, dentry->d_name.name);
  528. if (dentry->d_name.len > NAME_MAX)
  529. return ERR_PTR(-ENAMETOOLONG);
  530. err = ceph_init_dentry(dentry);
  531. if (err < 0)
  532. return ERR_PTR(err);
  533. /* can we conclude ENOENT locally? */
  534. if (dentry->d_inode == NULL) {
  535. struct ceph_inode_info *ci = ceph_inode(dir);
  536. struct ceph_dentry_info *di = ceph_dentry(dentry);
  537. spin_lock(&ci->i_ceph_lock);
  538. dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
  539. if (strncmp(dentry->d_name.name,
  540. fsc->mount_options->snapdir_name,
  541. dentry->d_name.len) &&
  542. !is_root_ceph_dentry(dir, dentry) &&
  543. __ceph_dir_is_complete(ci) &&
  544. (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
  545. spin_unlock(&ci->i_ceph_lock);
  546. dout(" dir %p complete, -ENOENT\n", dir);
  547. d_add(dentry, NULL);
  548. di->lease_shared_gen = ci->i_shared_gen;
  549. return NULL;
  550. }
  551. spin_unlock(&ci->i_ceph_lock);
  552. }
  553. op = ceph_snap(dir) == CEPH_SNAPDIR ?
  554. CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
  555. req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
  556. if (IS_ERR(req))
  557. return ERR_CAST(req);
  558. req->r_dentry = dget(dentry);
  559. req->r_num_caps = 2;
  560. /* we only need inode linkage */
  561. req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
  562. req->r_locked_dir = dir;
  563. err = ceph_mdsc_do_request(mdsc, NULL, req);
  564. err = ceph_handle_snapdir(req, dentry, err);
  565. dentry = ceph_finish_lookup(req, dentry, err);
  566. ceph_mdsc_put_request(req); /* will dput(dentry) */
  567. dout("lookup result=%p\n", dentry);
  568. return dentry;
  569. }
  570. /*
  571. * If we do a create but get no trace back from the MDS, follow up with
  572. * a lookup (the VFS expects us to link up the provided dentry).
  573. */
  574. int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
  575. {
  576. struct dentry *result = ceph_lookup(dir, dentry, 0);
  577. if (result && !IS_ERR(result)) {
  578. /*
  579. * We created the item, then did a lookup, and found
  580. * it was already linked to another inode we already
  581. * had in our cache (and thus got spliced). Link our
  582. * dentry to that inode, but don't hash it, just in
  583. * case the VFS wants to dereference it.
  584. */
  585. BUG_ON(!result->d_inode);
  586. d_instantiate(dentry, result->d_inode);
  587. return 0;
  588. }
  589. return PTR_ERR(result);
  590. }
  591. static int ceph_mknod(struct inode *dir, struct dentry *dentry,
  592. umode_t mode, dev_t rdev)
  593. {
  594. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  595. struct ceph_mds_client *mdsc = fsc->mdsc;
  596. struct ceph_mds_request *req;
  597. int err;
  598. if (ceph_snap(dir) != CEPH_NOSNAP)
  599. return -EROFS;
  600. dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
  601. dir, dentry, mode, rdev);
  602. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
  603. if (IS_ERR(req)) {
  604. d_drop(dentry);
  605. return PTR_ERR(req);
  606. }
  607. req->r_dentry = dget(dentry);
  608. req->r_num_caps = 2;
  609. req->r_locked_dir = dir;
  610. req->r_args.mknod.mode = cpu_to_le32(mode);
  611. req->r_args.mknod.rdev = cpu_to_le32(rdev);
  612. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  613. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  614. err = ceph_mdsc_do_request(mdsc, dir, req);
  615. if (!err && !req->r_reply_info.head->is_dentry)
  616. err = ceph_handle_notrace_create(dir, dentry);
  617. ceph_mdsc_put_request(req);
  618. if (err)
  619. d_drop(dentry);
  620. return err;
  621. }
  622. static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
  623. bool excl)
  624. {
  625. return ceph_mknod(dir, dentry, mode, 0);
  626. }
  627. static int ceph_symlink(struct inode *dir, struct dentry *dentry,
  628. const char *dest)
  629. {
  630. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  631. struct ceph_mds_client *mdsc = fsc->mdsc;
  632. struct ceph_mds_request *req;
  633. int err;
  634. if (ceph_snap(dir) != CEPH_NOSNAP)
  635. return -EROFS;
  636. dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
  637. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
  638. if (IS_ERR(req)) {
  639. d_drop(dentry);
  640. return PTR_ERR(req);
  641. }
  642. req->r_dentry = dget(dentry);
  643. req->r_num_caps = 2;
  644. req->r_path2 = kstrdup(dest, GFP_NOFS);
  645. req->r_locked_dir = dir;
  646. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  647. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  648. err = ceph_mdsc_do_request(mdsc, dir, req);
  649. if (!err && !req->r_reply_info.head->is_dentry)
  650. err = ceph_handle_notrace_create(dir, dentry);
  651. ceph_mdsc_put_request(req);
  652. if (err)
  653. d_drop(dentry);
  654. return err;
  655. }
  656. static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  657. {
  658. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  659. struct ceph_mds_client *mdsc = fsc->mdsc;
  660. struct ceph_mds_request *req;
  661. int err = -EROFS;
  662. int op;
  663. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  664. /* mkdir .snap/foo is a MKSNAP */
  665. op = CEPH_MDS_OP_MKSNAP;
  666. dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
  667. dentry->d_name.len, dentry->d_name.name, dentry);
  668. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  669. dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
  670. op = CEPH_MDS_OP_MKDIR;
  671. } else {
  672. goto out;
  673. }
  674. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  675. if (IS_ERR(req)) {
  676. err = PTR_ERR(req);
  677. goto out;
  678. }
  679. req->r_dentry = dget(dentry);
  680. req->r_num_caps = 2;
  681. req->r_locked_dir = dir;
  682. req->r_args.mkdir.mode = cpu_to_le32(mode);
  683. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  684. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  685. err = ceph_mdsc_do_request(mdsc, dir, req);
  686. if (!err && !req->r_reply_info.head->is_dentry)
  687. err = ceph_handle_notrace_create(dir, dentry);
  688. ceph_mdsc_put_request(req);
  689. out:
  690. if (err < 0)
  691. d_drop(dentry);
  692. return err;
  693. }
  694. static int ceph_link(struct dentry *old_dentry, struct inode *dir,
  695. struct dentry *dentry)
  696. {
  697. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  698. struct ceph_mds_client *mdsc = fsc->mdsc;
  699. struct ceph_mds_request *req;
  700. int err;
  701. if (ceph_snap(dir) != CEPH_NOSNAP)
  702. return -EROFS;
  703. dout("link in dir %p old_dentry %p dentry %p\n", dir,
  704. old_dentry, dentry);
  705. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
  706. if (IS_ERR(req)) {
  707. d_drop(dentry);
  708. return PTR_ERR(req);
  709. }
  710. req->r_dentry = dget(dentry);
  711. req->r_num_caps = 2;
  712. req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
  713. req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
  714. req->r_locked_dir = dir;
  715. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  716. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  717. err = ceph_mdsc_do_request(mdsc, dir, req);
  718. if (err) {
  719. d_drop(dentry);
  720. } else if (!req->r_reply_info.head->is_dentry) {
  721. ihold(old_dentry->d_inode);
  722. d_instantiate(dentry, old_dentry->d_inode);
  723. }
  724. ceph_mdsc_put_request(req);
  725. return err;
  726. }
  727. /*
  728. * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
  729. * looks like the link count will hit 0, drop any other caps (other
  730. * than PIN) we don't specifically want (due to the file still being
  731. * open).
  732. */
  733. static int drop_caps_for_unlink(struct inode *inode)
  734. {
  735. struct ceph_inode_info *ci = ceph_inode(inode);
  736. int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
  737. spin_lock(&ci->i_ceph_lock);
  738. if (inode->i_nlink == 1) {
  739. drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
  740. ci->i_ceph_flags |= CEPH_I_NODELAY;
  741. }
  742. spin_unlock(&ci->i_ceph_lock);
  743. return drop;
  744. }
  745. /*
  746. * rmdir and unlink are differ only by the metadata op code
  747. */
  748. static int ceph_unlink(struct inode *dir, struct dentry *dentry)
  749. {
  750. struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
  751. struct ceph_mds_client *mdsc = fsc->mdsc;
  752. struct inode *inode = dentry->d_inode;
  753. struct ceph_mds_request *req;
  754. int err = -EROFS;
  755. int op;
  756. if (ceph_snap(dir) == CEPH_SNAPDIR) {
  757. /* rmdir .snap/foo is RMSNAP */
  758. dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
  759. dentry->d_name.name, dentry);
  760. op = CEPH_MDS_OP_RMSNAP;
  761. } else if (ceph_snap(dir) == CEPH_NOSNAP) {
  762. dout("unlink/rmdir dir %p dn %p inode %p\n",
  763. dir, dentry, inode);
  764. op = S_ISDIR(dentry->d_inode->i_mode) ?
  765. CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
  766. } else
  767. goto out;
  768. req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
  769. if (IS_ERR(req)) {
  770. err = PTR_ERR(req);
  771. goto out;
  772. }
  773. req->r_dentry = dget(dentry);
  774. req->r_num_caps = 2;
  775. req->r_locked_dir = dir;
  776. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  777. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  778. req->r_inode_drop = drop_caps_for_unlink(inode);
  779. err = ceph_mdsc_do_request(mdsc, dir, req);
  780. if (!err && !req->r_reply_info.head->is_dentry)
  781. d_delete(dentry);
  782. ceph_mdsc_put_request(req);
  783. out:
  784. return err;
  785. }
  786. static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
  787. struct inode *new_dir, struct dentry *new_dentry)
  788. {
  789. struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
  790. struct ceph_mds_client *mdsc = fsc->mdsc;
  791. struct ceph_mds_request *req;
  792. int err;
  793. if (ceph_snap(old_dir) != ceph_snap(new_dir))
  794. return -EXDEV;
  795. if (ceph_snap(old_dir) != CEPH_NOSNAP ||
  796. ceph_snap(new_dir) != CEPH_NOSNAP)
  797. return -EROFS;
  798. dout("rename dir %p dentry %p to dir %p dentry %p\n",
  799. old_dir, old_dentry, new_dir, new_dentry);
  800. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
  801. if (IS_ERR(req))
  802. return PTR_ERR(req);
  803. req->r_dentry = dget(new_dentry);
  804. req->r_num_caps = 2;
  805. req->r_old_dentry = dget(old_dentry);
  806. req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
  807. req->r_locked_dir = new_dir;
  808. req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
  809. req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
  810. req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
  811. req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
  812. /* release LINK_RDCACHE on source inode (mds will lock it) */
  813. req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
  814. if (new_dentry->d_inode)
  815. req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
  816. err = ceph_mdsc_do_request(mdsc, old_dir, req);
  817. if (!err && !req->r_reply_info.head->is_dentry) {
  818. /*
  819. * Normally d_move() is done by fill_trace (called by
  820. * do_request, above). If there is no trace, we need
  821. * to do it here.
  822. */
  823. /* d_move screws up d_subdirs order */
  824. ceph_dir_clear_complete(new_dir);
  825. d_move(old_dentry, new_dentry);
  826. /* ensure target dentry is invalidated, despite
  827. rehashing bug in vfs_rename_dir */
  828. ceph_invalidate_dentry_lease(new_dentry);
  829. }
  830. ceph_mdsc_put_request(req);
  831. return err;
  832. }
  833. /*
  834. * Ensure a dentry lease will no longer revalidate.
  835. */
  836. void ceph_invalidate_dentry_lease(struct dentry *dentry)
  837. {
  838. spin_lock(&dentry->d_lock);
  839. dentry->d_time = jiffies;
  840. ceph_dentry(dentry)->lease_shared_gen = 0;
  841. spin_unlock(&dentry->d_lock);
  842. }
  843. /*
  844. * Check if dentry lease is valid. If not, delete the lease. Try to
  845. * renew if the least is more than half up.
  846. */
  847. static int dentry_lease_is_valid(struct dentry *dentry)
  848. {
  849. struct ceph_dentry_info *di;
  850. struct ceph_mds_session *s;
  851. int valid = 0;
  852. u32 gen;
  853. unsigned long ttl;
  854. struct ceph_mds_session *session = NULL;
  855. struct inode *dir = NULL;
  856. u32 seq = 0;
  857. spin_lock(&dentry->d_lock);
  858. di = ceph_dentry(dentry);
  859. if (di->lease_session) {
  860. s = di->lease_session;
  861. spin_lock(&s->s_gen_ttl_lock);
  862. gen = s->s_cap_gen;
  863. ttl = s->s_cap_ttl;
  864. spin_unlock(&s->s_gen_ttl_lock);
  865. if (di->lease_gen == gen &&
  866. time_before(jiffies, dentry->d_time) &&
  867. time_before(jiffies, ttl)) {
  868. valid = 1;
  869. if (di->lease_renew_after &&
  870. time_after(jiffies, di->lease_renew_after)) {
  871. /* we should renew */
  872. dir = dentry->d_parent->d_inode;
  873. session = ceph_get_mds_session(s);
  874. seq = di->lease_seq;
  875. di->lease_renew_after = 0;
  876. di->lease_renew_from = jiffies;
  877. }
  878. }
  879. }
  880. spin_unlock(&dentry->d_lock);
  881. if (session) {
  882. ceph_mdsc_lease_send_msg(session, dir, dentry,
  883. CEPH_MDS_LEASE_RENEW, seq);
  884. ceph_put_mds_session(session);
  885. }
  886. dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
  887. return valid;
  888. }
  889. /*
  890. * Check if directory-wide content lease/cap is valid.
  891. */
  892. static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
  893. {
  894. struct ceph_inode_info *ci = ceph_inode(dir);
  895. struct ceph_dentry_info *di = ceph_dentry(dentry);
  896. int valid = 0;
  897. spin_lock(&ci->i_ceph_lock);
  898. if (ci->i_shared_gen == di->lease_shared_gen)
  899. valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
  900. spin_unlock(&ci->i_ceph_lock);
  901. dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
  902. dir, (unsigned)ci->i_shared_gen, dentry,
  903. (unsigned)di->lease_shared_gen, valid);
  904. return valid;
  905. }
  906. /*
  907. * Check if cached dentry can be trusted.
  908. */
  909. static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
  910. {
  911. int valid = 0;
  912. struct inode *dir;
  913. if (flags & LOOKUP_RCU)
  914. return -ECHILD;
  915. dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
  916. dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
  917. ceph_dentry(dentry)->offset);
  918. dir = ceph_get_dentry_parent_inode(dentry);
  919. /* always trust cached snapped dentries, snapdir dentry */
  920. if (ceph_snap(dir) != CEPH_NOSNAP) {
  921. dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
  922. dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
  923. valid = 1;
  924. } else if (dentry->d_inode &&
  925. ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
  926. valid = 1;
  927. } else if (dentry_lease_is_valid(dentry) ||
  928. dir_lease_is_valid(dir, dentry)) {
  929. valid = 1;
  930. }
  931. dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
  932. if (valid)
  933. ceph_dentry_lru_touch(dentry);
  934. else
  935. d_drop(dentry);
  936. iput(dir);
  937. return valid;
  938. }
  939. /*
  940. * Release our ceph_dentry_info.
  941. */
  942. static void ceph_d_release(struct dentry *dentry)
  943. {
  944. struct ceph_dentry_info *di = ceph_dentry(dentry);
  945. dout("d_release %p\n", dentry);
  946. ceph_dentry_lru_del(dentry);
  947. if (di->lease_session)
  948. ceph_put_mds_session(di->lease_session);
  949. kmem_cache_free(ceph_dentry_cachep, di);
  950. dentry->d_fsdata = NULL;
  951. }
  952. static int ceph_snapdir_d_revalidate(struct dentry *dentry,
  953. unsigned int flags)
  954. {
  955. /*
  956. * Eventually, we'll want to revalidate snapped metadata
  957. * too... probably...
  958. */
  959. return 1;
  960. }
  961. /*
  962. * When the VFS prunes a dentry from the cache, we need to clear the
  963. * complete flag on the parent directory.
  964. *
  965. * Called under dentry->d_lock.
  966. */
  967. static void ceph_d_prune(struct dentry *dentry)
  968. {
  969. dout("ceph_d_prune %p\n", dentry);
  970. /* do we have a valid parent? */
  971. if (IS_ROOT(dentry))
  972. return;
  973. /* if we are not hashed, we don't affect dir's completeness */
  974. if (d_unhashed(dentry))
  975. return;
  976. /*
  977. * we hold d_lock, so d_parent is stable, and d_fsdata is never
  978. * cleared until d_release
  979. */
  980. ceph_dir_clear_complete(dentry->d_parent->d_inode);
  981. }
  982. /*
  983. * read() on a dir. This weird interface hack only works if mounted
  984. * with '-o dirstat'.
  985. */
  986. static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
  987. loff_t *ppos)
  988. {
  989. struct ceph_file_info *cf = file->private_data;
  990. struct inode *inode = file_inode(file);
  991. struct ceph_inode_info *ci = ceph_inode(inode);
  992. int left;
  993. const int bufsize = 1024;
  994. if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
  995. return -EISDIR;
  996. if (!cf->dir_info) {
  997. cf->dir_info = kmalloc(bufsize, GFP_NOFS);
  998. if (!cf->dir_info)
  999. return -ENOMEM;
  1000. cf->dir_info_len =
  1001. snprintf(cf->dir_info, bufsize,
  1002. "entries: %20lld\n"
  1003. " files: %20lld\n"
  1004. " subdirs: %20lld\n"
  1005. "rentries: %20lld\n"
  1006. " rfiles: %20lld\n"
  1007. " rsubdirs: %20lld\n"
  1008. "rbytes: %20lld\n"
  1009. "rctime: %10ld.%09ld\n",
  1010. ci->i_files + ci->i_subdirs,
  1011. ci->i_files,
  1012. ci->i_subdirs,
  1013. ci->i_rfiles + ci->i_rsubdirs,
  1014. ci->i_rfiles,
  1015. ci->i_rsubdirs,
  1016. ci->i_rbytes,
  1017. (long)ci->i_rctime.tv_sec,
  1018. (long)ci->i_rctime.tv_nsec);
  1019. }
  1020. if (*ppos >= cf->dir_info_len)
  1021. return 0;
  1022. size = min_t(unsigned, size, cf->dir_info_len-*ppos);
  1023. left = copy_to_user(buf, cf->dir_info + *ppos, size);
  1024. if (left == size)
  1025. return -EFAULT;
  1026. *ppos += (size - left);
  1027. return size - left;
  1028. }
  1029. /*
  1030. * an fsync() on a dir will wait for any uncommitted directory
  1031. * operations to commit.
  1032. */
  1033. static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
  1034. int datasync)
  1035. {
  1036. struct inode *inode = file_inode(file);
  1037. struct ceph_inode_info *ci = ceph_inode(inode);
  1038. struct list_head *head = &ci->i_unsafe_dirops;
  1039. struct ceph_mds_request *req;
  1040. u64 last_tid;
  1041. int ret = 0;
  1042. dout("dir_fsync %p\n", inode);
  1043. ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
  1044. if (ret)
  1045. return ret;
  1046. mutex_lock(&inode->i_mutex);
  1047. spin_lock(&ci->i_unsafe_lock);
  1048. if (list_empty(head))
  1049. goto out;
  1050. req = list_entry(head->prev,
  1051. struct ceph_mds_request, r_unsafe_dir_item);
  1052. last_tid = req->r_tid;
  1053. do {
  1054. ceph_mdsc_get_request(req);
  1055. spin_unlock(&ci->i_unsafe_lock);
  1056. dout("dir_fsync %p wait on tid %llu (until %llu)\n",
  1057. inode, req->r_tid, last_tid);
  1058. if (req->r_timeout) {
  1059. ret = wait_for_completion_timeout(
  1060. &req->r_safe_completion, req->r_timeout);
  1061. if (ret > 0)
  1062. ret = 0;
  1063. else if (ret == 0)
  1064. ret = -EIO; /* timed out */
  1065. } else {
  1066. wait_for_completion(&req->r_safe_completion);
  1067. }
  1068. ceph_mdsc_put_request(req);
  1069. spin_lock(&ci->i_unsafe_lock);
  1070. if (ret || list_empty(head))
  1071. break;
  1072. req = list_entry(head->next,
  1073. struct ceph_mds_request, r_unsafe_dir_item);
  1074. } while (req->r_tid < last_tid);
  1075. out:
  1076. spin_unlock(&ci->i_unsafe_lock);
  1077. mutex_unlock(&inode->i_mutex);
  1078. return ret;
  1079. }
  1080. /*
  1081. * We maintain a private dentry LRU.
  1082. *
  1083. * FIXME: this needs to be changed to a per-mds lru to be useful.
  1084. */
  1085. void ceph_dentry_lru_add(struct dentry *dn)
  1086. {
  1087. struct ceph_dentry_info *di = ceph_dentry(dn);
  1088. struct ceph_mds_client *mdsc;
  1089. dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
  1090. dn->d_name.len, dn->d_name.name);
  1091. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1092. spin_lock(&mdsc->dentry_lru_lock);
  1093. list_add_tail(&di->lru, &mdsc->dentry_lru);
  1094. mdsc->num_dentry++;
  1095. spin_unlock(&mdsc->dentry_lru_lock);
  1096. }
  1097. void ceph_dentry_lru_touch(struct dentry *dn)
  1098. {
  1099. struct ceph_dentry_info *di = ceph_dentry(dn);
  1100. struct ceph_mds_client *mdsc;
  1101. dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
  1102. dn->d_name.len, dn->d_name.name, di->offset);
  1103. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1104. spin_lock(&mdsc->dentry_lru_lock);
  1105. list_move_tail(&di->lru, &mdsc->dentry_lru);
  1106. spin_unlock(&mdsc->dentry_lru_lock);
  1107. }
  1108. void ceph_dentry_lru_del(struct dentry *dn)
  1109. {
  1110. struct ceph_dentry_info *di = ceph_dentry(dn);
  1111. struct ceph_mds_client *mdsc;
  1112. dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
  1113. dn->d_name.len, dn->d_name.name);
  1114. mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
  1115. spin_lock(&mdsc->dentry_lru_lock);
  1116. list_del_init(&di->lru);
  1117. mdsc->num_dentry--;
  1118. spin_unlock(&mdsc->dentry_lru_lock);
  1119. }
  1120. /*
  1121. * Return name hash for a given dentry. This is dependent on
  1122. * the parent directory's hash function.
  1123. */
  1124. unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
  1125. {
  1126. struct ceph_inode_info *dci = ceph_inode(dir);
  1127. switch (dci->i_dir_layout.dl_dir_hash) {
  1128. case 0: /* for backward compat */
  1129. case CEPH_STR_HASH_LINUX:
  1130. return dn->d_name.hash;
  1131. default:
  1132. return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
  1133. dn->d_name.name, dn->d_name.len);
  1134. }
  1135. }
  1136. const struct file_operations ceph_dir_fops = {
  1137. .read = ceph_read_dir,
  1138. .readdir = ceph_readdir,
  1139. .llseek = ceph_dir_llseek,
  1140. .open = ceph_open,
  1141. .release = ceph_release,
  1142. .unlocked_ioctl = ceph_ioctl,
  1143. .fsync = ceph_dir_fsync,
  1144. };
  1145. const struct inode_operations ceph_dir_iops = {
  1146. .lookup = ceph_lookup,
  1147. .permission = ceph_permission,
  1148. .getattr = ceph_getattr,
  1149. .setattr = ceph_setattr,
  1150. .setxattr = ceph_setxattr,
  1151. .getxattr = ceph_getxattr,
  1152. .listxattr = ceph_listxattr,
  1153. .removexattr = ceph_removexattr,
  1154. .mknod = ceph_mknod,
  1155. .symlink = ceph_symlink,
  1156. .mkdir = ceph_mkdir,
  1157. .link = ceph_link,
  1158. .unlink = ceph_unlink,
  1159. .rmdir = ceph_unlink,
  1160. .rename = ceph_rename,
  1161. .create = ceph_create,
  1162. .atomic_open = ceph_atomic_open,
  1163. };
  1164. const struct dentry_operations ceph_dentry_ops = {
  1165. .d_revalidate = ceph_d_revalidate,
  1166. .d_release = ceph_d_release,
  1167. .d_prune = ceph_d_prune,
  1168. };
  1169. const struct dentry_operations ceph_snapdir_dentry_ops = {
  1170. .d_revalidate = ceph_snapdir_d_revalidate,
  1171. .d_release = ceph_d_release,
  1172. };
  1173. const struct dentry_operations ceph_snap_dentry_ops = {
  1174. .d_release = ceph_d_release,
  1175. .d_prune = ceph_d_prune,
  1176. };