inode.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/fs.h>
  4. #include <linux/slab.h>
  5. #include <linux/string.h>
  6. #include <linux/uaccess.h>
  7. #include <linux/kernel.h>
  8. #include <linux/namei.h>
  9. #include <linux/writeback.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/pagevec.h>
  12. #include "super.h"
  13. #include "mds_client.h"
  14. #include <linux/ceph/decode.h>
  15. /*
  16. * Ceph inode operations
  17. *
  18. * Implement basic inode helpers (get, alloc) and inode ops (getattr,
  19. * setattr, etc.), xattr helpers, and helpers for assimilating
  20. * metadata returned by the MDS into our cache.
  21. *
  22. * Also define helpers for doing asynchronous writeback, invalidation,
  23. * and truncation for the benefit of those who can't afford to block
  24. * (typically because they are in the message handler path).
  25. */
  26. static const struct inode_operations ceph_symlink_iops;
  27. static void ceph_invalidate_work(struct work_struct *work);
  28. static void ceph_writeback_work(struct work_struct *work);
  29. static void ceph_vmtruncate_work(struct work_struct *work);
  30. /*
  31. * find or create an inode, given the ceph ino number
  32. */
  33. struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
  34. {
  35. struct inode *inode;
  36. ino_t t = ceph_vino_to_ino(vino);
  37. inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
  38. if (inode == NULL)
  39. return ERR_PTR(-ENOMEM);
  40. if (inode->i_state & I_NEW) {
  41. dout("get_inode created new inode %p %llx.%llx ino %llx\n",
  42. inode, ceph_vinop(inode), (u64)inode->i_ino);
  43. unlock_new_inode(inode);
  44. }
  45. dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
  46. vino.snap, inode);
  47. return inode;
  48. }
  49. /*
  50. * get/constuct snapdir inode for a given directory
  51. */
  52. struct inode *ceph_get_snapdir(struct inode *parent)
  53. {
  54. struct ceph_vino vino = {
  55. .ino = ceph_ino(parent),
  56. .snap = CEPH_SNAPDIR,
  57. };
  58. struct inode *inode = ceph_get_inode(parent->i_sb, vino);
  59. struct ceph_inode_info *ci = ceph_inode(inode);
  60. BUG_ON(!S_ISDIR(parent->i_mode));
  61. if (IS_ERR(inode))
  62. return inode;
  63. inode->i_mode = parent->i_mode;
  64. inode->i_uid = parent->i_uid;
  65. inode->i_gid = parent->i_gid;
  66. inode->i_op = &ceph_dir_iops;
  67. inode->i_fop = &ceph_dir_fops;
  68. ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
  69. ci->i_rbytes = 0;
  70. return inode;
  71. }
  72. const struct inode_operations ceph_file_iops = {
  73. .permission = ceph_permission,
  74. .setattr = ceph_setattr,
  75. .getattr = ceph_getattr,
  76. .setxattr = ceph_setxattr,
  77. .getxattr = ceph_getxattr,
  78. .listxattr = ceph_listxattr,
  79. .removexattr = ceph_removexattr,
  80. };
  81. /*
  82. * We use a 'frag tree' to keep track of the MDS's directory fragments
  83. * for a given inode (usually there is just a single fragment). We
  84. * need to know when a child frag is delegated to a new MDS, or when
  85. * it is flagged as replicated, so we can direct our requests
  86. * accordingly.
  87. */
  88. /*
  89. * find/create a frag in the tree
  90. */
  91. static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
  92. u32 f)
  93. {
  94. struct rb_node **p;
  95. struct rb_node *parent = NULL;
  96. struct ceph_inode_frag *frag;
  97. int c;
  98. p = &ci->i_fragtree.rb_node;
  99. while (*p) {
  100. parent = *p;
  101. frag = rb_entry(parent, struct ceph_inode_frag, node);
  102. c = ceph_frag_compare(f, frag->frag);
  103. if (c < 0)
  104. p = &(*p)->rb_left;
  105. else if (c > 0)
  106. p = &(*p)->rb_right;
  107. else
  108. return frag;
  109. }
  110. frag = kmalloc(sizeof(*frag), GFP_NOFS);
  111. if (!frag) {
  112. pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
  113. "frag %x\n", &ci->vfs_inode,
  114. ceph_vinop(&ci->vfs_inode), f);
  115. return ERR_PTR(-ENOMEM);
  116. }
  117. frag->frag = f;
  118. frag->split_by = 0;
  119. frag->mds = -1;
  120. frag->ndist = 0;
  121. rb_link_node(&frag->node, parent, p);
  122. rb_insert_color(&frag->node, &ci->i_fragtree);
  123. dout("get_or_create_frag added %llx.%llx frag %x\n",
  124. ceph_vinop(&ci->vfs_inode), f);
  125. return frag;
  126. }
  127. /*
  128. * find a specific frag @f
  129. */
  130. struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
  131. {
  132. struct rb_node *n = ci->i_fragtree.rb_node;
  133. while (n) {
  134. struct ceph_inode_frag *frag =
  135. rb_entry(n, struct ceph_inode_frag, node);
  136. int c = ceph_frag_compare(f, frag->frag);
  137. if (c < 0)
  138. n = n->rb_left;
  139. else if (c > 0)
  140. n = n->rb_right;
  141. else
  142. return frag;
  143. }
  144. return NULL;
  145. }
  146. /*
  147. * Choose frag containing the given value @v. If @pfrag is
  148. * specified, copy the frag delegation info to the caller if
  149. * it is present.
  150. */
  151. u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
  152. struct ceph_inode_frag *pfrag,
  153. int *found)
  154. {
  155. u32 t = ceph_frag_make(0, 0);
  156. struct ceph_inode_frag *frag;
  157. unsigned nway, i;
  158. u32 n;
  159. if (found)
  160. *found = 0;
  161. mutex_lock(&ci->i_fragtree_mutex);
  162. while (1) {
  163. WARN_ON(!ceph_frag_contains_value(t, v));
  164. frag = __ceph_find_frag(ci, t);
  165. if (!frag)
  166. break; /* t is a leaf */
  167. if (frag->split_by == 0) {
  168. if (pfrag)
  169. memcpy(pfrag, frag, sizeof(*pfrag));
  170. if (found)
  171. *found = 1;
  172. break;
  173. }
  174. /* choose child */
  175. nway = 1 << frag->split_by;
  176. dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
  177. frag->split_by, nway);
  178. for (i = 0; i < nway; i++) {
  179. n = ceph_frag_make_child(t, frag->split_by, i);
  180. if (ceph_frag_contains_value(n, v)) {
  181. t = n;
  182. break;
  183. }
  184. }
  185. BUG_ON(i == nway);
  186. }
  187. dout("choose_frag(%x) = %x\n", v, t);
  188. mutex_unlock(&ci->i_fragtree_mutex);
  189. return t;
  190. }
  191. /*
  192. * Process dirfrag (delegation) info from the mds. Include leaf
  193. * fragment in tree ONLY if ndist > 0. Otherwise, only
  194. * branches/splits are included in i_fragtree)
  195. */
  196. static int ceph_fill_dirfrag(struct inode *inode,
  197. struct ceph_mds_reply_dirfrag *dirinfo)
  198. {
  199. struct ceph_inode_info *ci = ceph_inode(inode);
  200. struct ceph_inode_frag *frag;
  201. u32 id = le32_to_cpu(dirinfo->frag);
  202. int mds = le32_to_cpu(dirinfo->auth);
  203. int ndist = le32_to_cpu(dirinfo->ndist);
  204. int i;
  205. int err = 0;
  206. mutex_lock(&ci->i_fragtree_mutex);
  207. if (ndist == 0) {
  208. /* no delegation info needed. */
  209. frag = __ceph_find_frag(ci, id);
  210. if (!frag)
  211. goto out;
  212. if (frag->split_by == 0) {
  213. /* tree leaf, remove */
  214. dout("fill_dirfrag removed %llx.%llx frag %x"
  215. " (no ref)\n", ceph_vinop(inode), id);
  216. rb_erase(&frag->node, &ci->i_fragtree);
  217. kfree(frag);
  218. } else {
  219. /* tree branch, keep and clear */
  220. dout("fill_dirfrag cleared %llx.%llx frag %x"
  221. " referral\n", ceph_vinop(inode), id);
  222. frag->mds = -1;
  223. frag->ndist = 0;
  224. }
  225. goto out;
  226. }
  227. /* find/add this frag to store mds delegation info */
  228. frag = __get_or_create_frag(ci, id);
  229. if (IS_ERR(frag)) {
  230. /* this is not the end of the world; we can continue
  231. with bad/inaccurate delegation info */
  232. pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
  233. ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
  234. err = -ENOMEM;
  235. goto out;
  236. }
  237. frag->mds = mds;
  238. frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
  239. for (i = 0; i < frag->ndist; i++)
  240. frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
  241. dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
  242. ceph_vinop(inode), frag->frag, frag->ndist);
  243. out:
  244. mutex_unlock(&ci->i_fragtree_mutex);
  245. return err;
  246. }
  247. /*
  248. * initialize a newly allocated inode.
  249. */
  250. struct inode *ceph_alloc_inode(struct super_block *sb)
  251. {
  252. struct ceph_inode_info *ci;
  253. int i;
  254. ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
  255. if (!ci)
  256. return NULL;
  257. dout("alloc_inode %p\n", &ci->vfs_inode);
  258. ci->i_version = 0;
  259. ci->i_time_warp_seq = 0;
  260. ci->i_ceph_flags = 0;
  261. ci->i_release_count = 0;
  262. ci->i_symlink = NULL;
  263. ci->i_fragtree = RB_ROOT;
  264. mutex_init(&ci->i_fragtree_mutex);
  265. ci->i_xattrs.blob = NULL;
  266. ci->i_xattrs.prealloc_blob = NULL;
  267. ci->i_xattrs.dirty = false;
  268. ci->i_xattrs.index = RB_ROOT;
  269. ci->i_xattrs.count = 0;
  270. ci->i_xattrs.names_size = 0;
  271. ci->i_xattrs.vals_size = 0;
  272. ci->i_xattrs.version = 0;
  273. ci->i_xattrs.index_version = 0;
  274. ci->i_caps = RB_ROOT;
  275. ci->i_auth_cap = NULL;
  276. ci->i_dirty_caps = 0;
  277. ci->i_flushing_caps = 0;
  278. INIT_LIST_HEAD(&ci->i_dirty_item);
  279. INIT_LIST_HEAD(&ci->i_flushing_item);
  280. ci->i_cap_flush_seq = 0;
  281. ci->i_cap_flush_last_tid = 0;
  282. memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
  283. init_waitqueue_head(&ci->i_cap_wq);
  284. ci->i_hold_caps_min = 0;
  285. ci->i_hold_caps_max = 0;
  286. INIT_LIST_HEAD(&ci->i_cap_delay_list);
  287. ci->i_cap_exporting_mds = 0;
  288. ci->i_cap_exporting_mseq = 0;
  289. ci->i_cap_exporting_issued = 0;
  290. INIT_LIST_HEAD(&ci->i_cap_snaps);
  291. ci->i_head_snapc = NULL;
  292. ci->i_snap_caps = 0;
  293. for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
  294. ci->i_nr_by_mode[i] = 0;
  295. ci->i_truncate_seq = 0;
  296. ci->i_truncate_size = 0;
  297. ci->i_truncate_pending = 0;
  298. ci->i_max_size = 0;
  299. ci->i_reported_size = 0;
  300. ci->i_wanted_max_size = 0;
  301. ci->i_requested_max_size = 0;
  302. ci->i_pin_ref = 0;
  303. ci->i_rd_ref = 0;
  304. ci->i_rdcache_ref = 0;
  305. ci->i_wr_ref = 0;
  306. ci->i_wrbuffer_ref = 0;
  307. ci->i_wrbuffer_ref_head = 0;
  308. ci->i_shared_gen = 0;
  309. ci->i_rdcache_gen = 0;
  310. ci->i_rdcache_revoking = 0;
  311. INIT_LIST_HEAD(&ci->i_unsafe_writes);
  312. INIT_LIST_HEAD(&ci->i_unsafe_dirops);
  313. spin_lock_init(&ci->i_unsafe_lock);
  314. ci->i_snap_realm = NULL;
  315. INIT_LIST_HEAD(&ci->i_snap_realm_item);
  316. INIT_LIST_HEAD(&ci->i_snap_flush_item);
  317. INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
  318. INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
  319. INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
  320. return &ci->vfs_inode;
  321. }
  322. static void ceph_i_callback(struct rcu_head *head)
  323. {
  324. struct inode *inode = container_of(head, struct inode, i_rcu);
  325. struct ceph_inode_info *ci = ceph_inode(inode);
  326. INIT_LIST_HEAD(&inode->i_dentry);
  327. kmem_cache_free(ceph_inode_cachep, ci);
  328. }
  329. void ceph_destroy_inode(struct inode *inode)
  330. {
  331. struct ceph_inode_info *ci = ceph_inode(inode);
  332. struct ceph_inode_frag *frag;
  333. struct rb_node *n;
  334. dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
  335. ceph_queue_caps_release(inode);
  336. /*
  337. * we may still have a snap_realm reference if there are stray
  338. * caps in i_cap_exporting_issued or i_snap_caps.
  339. */
  340. if (ci->i_snap_realm) {
  341. struct ceph_mds_client *mdsc =
  342. ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
  343. struct ceph_snap_realm *realm = ci->i_snap_realm;
  344. dout(" dropping residual ref to snap realm %p\n", realm);
  345. spin_lock(&realm->inodes_with_caps_lock);
  346. list_del_init(&ci->i_snap_realm_item);
  347. spin_unlock(&realm->inodes_with_caps_lock);
  348. ceph_put_snap_realm(mdsc, realm);
  349. }
  350. kfree(ci->i_symlink);
  351. while ((n = rb_first(&ci->i_fragtree)) != NULL) {
  352. frag = rb_entry(n, struct ceph_inode_frag, node);
  353. rb_erase(n, &ci->i_fragtree);
  354. kfree(frag);
  355. }
  356. __ceph_destroy_xattrs(ci);
  357. if (ci->i_xattrs.blob)
  358. ceph_buffer_put(ci->i_xattrs.blob);
  359. if (ci->i_xattrs.prealloc_blob)
  360. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  361. call_rcu(&inode->i_rcu, ceph_i_callback);
  362. }
  363. /*
  364. * Helpers to fill in size, ctime, mtime, and atime. We have to be
  365. * careful because either the client or MDS may have more up to date
  366. * info, depending on which capabilities are held, and whether
  367. * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
  368. * and size are monotonically increasing, except when utimes() or
  369. * truncate() increments the corresponding _seq values.)
  370. */
  371. int ceph_fill_file_size(struct inode *inode, int issued,
  372. u32 truncate_seq, u64 truncate_size, u64 size)
  373. {
  374. struct ceph_inode_info *ci = ceph_inode(inode);
  375. int queue_trunc = 0;
  376. if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
  377. (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
  378. dout("size %lld -> %llu\n", inode->i_size, size);
  379. inode->i_size = size;
  380. inode->i_blocks = (size + (1<<9) - 1) >> 9;
  381. ci->i_reported_size = size;
  382. if (truncate_seq != ci->i_truncate_seq) {
  383. dout("truncate_seq %u -> %u\n",
  384. ci->i_truncate_seq, truncate_seq);
  385. ci->i_truncate_seq = truncate_seq;
  386. /*
  387. * If we hold relevant caps, or in the case where we're
  388. * not the only client referencing this file and we
  389. * don't hold those caps, then we need to check whether
  390. * the file is either opened or mmaped
  391. */
  392. if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
  393. CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
  394. CEPH_CAP_FILE_EXCL|
  395. CEPH_CAP_FILE_LAZYIO)) ||
  396. mapping_mapped(inode->i_mapping) ||
  397. __ceph_caps_file_wanted(ci)) {
  398. ci->i_truncate_pending++;
  399. queue_trunc = 1;
  400. }
  401. }
  402. }
  403. if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
  404. ci->i_truncate_size != truncate_size) {
  405. dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
  406. truncate_size);
  407. ci->i_truncate_size = truncate_size;
  408. }
  409. return queue_trunc;
  410. }
  411. void ceph_fill_file_time(struct inode *inode, int issued,
  412. u64 time_warp_seq, struct timespec *ctime,
  413. struct timespec *mtime, struct timespec *atime)
  414. {
  415. struct ceph_inode_info *ci = ceph_inode(inode);
  416. int warn = 0;
  417. if (issued & (CEPH_CAP_FILE_EXCL|
  418. CEPH_CAP_FILE_WR|
  419. CEPH_CAP_FILE_BUFFER|
  420. CEPH_CAP_AUTH_EXCL|
  421. CEPH_CAP_XATTR_EXCL)) {
  422. if (timespec_compare(ctime, &inode->i_ctime) > 0) {
  423. dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
  424. inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
  425. ctime->tv_sec, ctime->tv_nsec);
  426. inode->i_ctime = *ctime;
  427. }
  428. if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
  429. /* the MDS did a utimes() */
  430. dout("mtime %ld.%09ld -> %ld.%09ld "
  431. "tw %d -> %d\n",
  432. inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
  433. mtime->tv_sec, mtime->tv_nsec,
  434. ci->i_time_warp_seq, (int)time_warp_seq);
  435. inode->i_mtime = *mtime;
  436. inode->i_atime = *atime;
  437. ci->i_time_warp_seq = time_warp_seq;
  438. } else if (time_warp_seq == ci->i_time_warp_seq) {
  439. /* nobody did utimes(); take the max */
  440. if (timespec_compare(mtime, &inode->i_mtime) > 0) {
  441. dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
  442. inode->i_mtime.tv_sec,
  443. inode->i_mtime.tv_nsec,
  444. mtime->tv_sec, mtime->tv_nsec);
  445. inode->i_mtime = *mtime;
  446. }
  447. if (timespec_compare(atime, &inode->i_atime) > 0) {
  448. dout("atime %ld.%09ld -> %ld.%09ld inc\n",
  449. inode->i_atime.tv_sec,
  450. inode->i_atime.tv_nsec,
  451. atime->tv_sec, atime->tv_nsec);
  452. inode->i_atime = *atime;
  453. }
  454. } else if (issued & CEPH_CAP_FILE_EXCL) {
  455. /* we did a utimes(); ignore mds values */
  456. } else {
  457. warn = 1;
  458. }
  459. } else {
  460. /* we have no write|excl caps; whatever the MDS says is true */
  461. if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
  462. inode->i_ctime = *ctime;
  463. inode->i_mtime = *mtime;
  464. inode->i_atime = *atime;
  465. ci->i_time_warp_seq = time_warp_seq;
  466. } else {
  467. warn = 1;
  468. }
  469. }
  470. if (warn) /* time_warp_seq shouldn't go backwards */
  471. dout("%p mds time_warp_seq %llu < %u\n",
  472. inode, time_warp_seq, ci->i_time_warp_seq);
  473. }
  474. /*
  475. * Populate an inode based on info from mds. May be called on new or
  476. * existing inodes.
  477. */
  478. static int fill_inode(struct inode *inode,
  479. struct ceph_mds_reply_info_in *iinfo,
  480. struct ceph_mds_reply_dirfrag *dirinfo,
  481. struct ceph_mds_session *session,
  482. unsigned long ttl_from, int cap_fmode,
  483. struct ceph_cap_reservation *caps_reservation)
  484. {
  485. struct ceph_mds_reply_inode *info = iinfo->in;
  486. struct ceph_inode_info *ci = ceph_inode(inode);
  487. int i;
  488. int issued, implemented;
  489. struct timespec mtime, atime, ctime;
  490. u32 nsplits;
  491. struct ceph_buffer *xattr_blob = NULL;
  492. int err = 0;
  493. int queue_trunc = 0;
  494. dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
  495. inode, ceph_vinop(inode), le64_to_cpu(info->version),
  496. ci->i_version);
  497. /*
  498. * prealloc xattr data, if it looks like we'll need it. only
  499. * if len > 4 (meaning there are actually xattrs; the first 4
  500. * bytes are the xattr count).
  501. */
  502. if (iinfo->xattr_len > 4) {
  503. xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
  504. if (!xattr_blob)
  505. pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
  506. iinfo->xattr_len);
  507. }
  508. spin_lock(&inode->i_lock);
  509. /*
  510. * provided version will be odd if inode value is projected,
  511. * even if stable. skip the update if we have newer stable
  512. * info (ours>=theirs, e.g. due to racing mds replies), unless
  513. * we are getting projected (unstable) info (in which case the
  514. * version is odd, and we want ours>theirs).
  515. * us them
  516. * 2 2 skip
  517. * 3 2 skip
  518. * 3 3 update
  519. */
  520. if (le64_to_cpu(info->version) > 0 &&
  521. (ci->i_version & ~1) >= le64_to_cpu(info->version))
  522. goto no_change;
  523. issued = __ceph_caps_issued(ci, &implemented);
  524. issued |= implemented | __ceph_caps_dirty(ci);
  525. /* update inode */
  526. ci->i_version = le64_to_cpu(info->version);
  527. inode->i_version++;
  528. inode->i_rdev = le32_to_cpu(info->rdev);
  529. if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
  530. inode->i_mode = le32_to_cpu(info->mode);
  531. inode->i_uid = le32_to_cpu(info->uid);
  532. inode->i_gid = le32_to_cpu(info->gid);
  533. dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
  534. inode->i_uid, inode->i_gid);
  535. }
  536. if ((issued & CEPH_CAP_LINK_EXCL) == 0)
  537. inode->i_nlink = le32_to_cpu(info->nlink);
  538. /* be careful with mtime, atime, size */
  539. ceph_decode_timespec(&atime, &info->atime);
  540. ceph_decode_timespec(&mtime, &info->mtime);
  541. ceph_decode_timespec(&ctime, &info->ctime);
  542. queue_trunc = ceph_fill_file_size(inode, issued,
  543. le32_to_cpu(info->truncate_seq),
  544. le64_to_cpu(info->truncate_size),
  545. le64_to_cpu(info->size));
  546. ceph_fill_file_time(inode, issued,
  547. le32_to_cpu(info->time_warp_seq),
  548. &ctime, &mtime, &atime);
  549. /* only update max_size on auth cap */
  550. if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
  551. ci->i_max_size != le64_to_cpu(info->max_size)) {
  552. dout("max_size %lld -> %llu\n", ci->i_max_size,
  553. le64_to_cpu(info->max_size));
  554. ci->i_max_size = le64_to_cpu(info->max_size);
  555. }
  556. ci->i_layout = info->layout;
  557. inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
  558. /* xattrs */
  559. /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
  560. if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
  561. le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
  562. if (ci->i_xattrs.blob)
  563. ceph_buffer_put(ci->i_xattrs.blob);
  564. ci->i_xattrs.blob = xattr_blob;
  565. if (xattr_blob)
  566. memcpy(ci->i_xattrs.blob->vec.iov_base,
  567. iinfo->xattr_data, iinfo->xattr_len);
  568. ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
  569. xattr_blob = NULL;
  570. }
  571. inode->i_mapping->a_ops = &ceph_aops;
  572. inode->i_mapping->backing_dev_info =
  573. &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
  574. switch (inode->i_mode & S_IFMT) {
  575. case S_IFIFO:
  576. case S_IFBLK:
  577. case S_IFCHR:
  578. case S_IFSOCK:
  579. init_special_inode(inode, inode->i_mode, inode->i_rdev);
  580. inode->i_op = &ceph_file_iops;
  581. break;
  582. case S_IFREG:
  583. inode->i_op = &ceph_file_iops;
  584. inode->i_fop = &ceph_file_fops;
  585. break;
  586. case S_IFLNK:
  587. inode->i_op = &ceph_symlink_iops;
  588. if (!ci->i_symlink) {
  589. int symlen = iinfo->symlink_len;
  590. char *sym;
  591. BUG_ON(symlen != inode->i_size);
  592. spin_unlock(&inode->i_lock);
  593. err = -ENOMEM;
  594. sym = kmalloc(symlen+1, GFP_NOFS);
  595. if (!sym)
  596. goto out;
  597. memcpy(sym, iinfo->symlink, symlen);
  598. sym[symlen] = 0;
  599. spin_lock(&inode->i_lock);
  600. if (!ci->i_symlink)
  601. ci->i_symlink = sym;
  602. else
  603. kfree(sym); /* lost a race */
  604. }
  605. break;
  606. case S_IFDIR:
  607. inode->i_op = &ceph_dir_iops;
  608. inode->i_fop = &ceph_dir_fops;
  609. ci->i_files = le64_to_cpu(info->files);
  610. ci->i_subdirs = le64_to_cpu(info->subdirs);
  611. ci->i_rbytes = le64_to_cpu(info->rbytes);
  612. ci->i_rfiles = le64_to_cpu(info->rfiles);
  613. ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
  614. ceph_decode_timespec(&ci->i_rctime, &info->rctime);
  615. /* set dir completion flag? */
  616. if (ci->i_files == 0 && ci->i_subdirs == 0 &&
  617. ceph_snap(inode) == CEPH_NOSNAP &&
  618. (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
  619. (issued & CEPH_CAP_FILE_EXCL) == 0 &&
  620. (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
  621. dout(" marking %p complete (empty)\n", inode);
  622. ci->i_ceph_flags |= CEPH_I_COMPLETE;
  623. ci->i_max_offset = 2;
  624. }
  625. /* it may be better to set st_size in getattr instead? */
  626. if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
  627. inode->i_size = ci->i_rbytes;
  628. break;
  629. default:
  630. pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
  631. ceph_vinop(inode), inode->i_mode);
  632. }
  633. no_change:
  634. spin_unlock(&inode->i_lock);
  635. /* queue truncate if we saw i_size decrease */
  636. if (queue_trunc)
  637. ceph_queue_vmtruncate(inode);
  638. /* populate frag tree */
  639. /* FIXME: move me up, if/when version reflects fragtree changes */
  640. nsplits = le32_to_cpu(info->fragtree.nsplits);
  641. mutex_lock(&ci->i_fragtree_mutex);
  642. for (i = 0; i < nsplits; i++) {
  643. u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
  644. struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
  645. if (IS_ERR(frag))
  646. continue;
  647. frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
  648. dout(" frag %x split by %d\n", frag->frag, frag->split_by);
  649. }
  650. mutex_unlock(&ci->i_fragtree_mutex);
  651. /* were we issued a capability? */
  652. if (info->cap.caps) {
  653. if (ceph_snap(inode) == CEPH_NOSNAP) {
  654. ceph_add_cap(inode, session,
  655. le64_to_cpu(info->cap.cap_id),
  656. cap_fmode,
  657. le32_to_cpu(info->cap.caps),
  658. le32_to_cpu(info->cap.wanted),
  659. le32_to_cpu(info->cap.seq),
  660. le32_to_cpu(info->cap.mseq),
  661. le64_to_cpu(info->cap.realm),
  662. info->cap.flags,
  663. caps_reservation);
  664. } else {
  665. spin_lock(&inode->i_lock);
  666. dout(" %p got snap_caps %s\n", inode,
  667. ceph_cap_string(le32_to_cpu(info->cap.caps)));
  668. ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
  669. if (cap_fmode >= 0)
  670. __ceph_get_fmode(ci, cap_fmode);
  671. spin_unlock(&inode->i_lock);
  672. }
  673. } else if (cap_fmode >= 0) {
  674. pr_warning("mds issued no caps on %llx.%llx\n",
  675. ceph_vinop(inode));
  676. __ceph_get_fmode(ci, cap_fmode);
  677. }
  678. /* update delegation info? */
  679. if (dirinfo)
  680. ceph_fill_dirfrag(inode, dirinfo);
  681. err = 0;
  682. out:
  683. if (xattr_blob)
  684. ceph_buffer_put(xattr_blob);
  685. return err;
  686. }
  687. /*
  688. * caller should hold session s_mutex.
  689. */
  690. static void update_dentry_lease(struct dentry *dentry,
  691. struct ceph_mds_reply_lease *lease,
  692. struct ceph_mds_session *session,
  693. unsigned long from_time)
  694. {
  695. struct ceph_dentry_info *di = ceph_dentry(dentry);
  696. long unsigned duration = le32_to_cpu(lease->duration_ms);
  697. long unsigned ttl = from_time + (duration * HZ) / 1000;
  698. long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
  699. struct inode *dir;
  700. /* only track leases on regular dentries */
  701. if (dentry->d_op != &ceph_dentry_ops)
  702. return;
  703. spin_lock(&dentry->d_lock);
  704. dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
  705. dentry, le16_to_cpu(lease->mask), duration, ttl);
  706. /* make lease_rdcache_gen match directory */
  707. dir = dentry->d_parent->d_inode;
  708. di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
  709. if (lease->mask == 0)
  710. goto out_unlock;
  711. if (di->lease_gen == session->s_cap_gen &&
  712. time_before(ttl, dentry->d_time))
  713. goto out_unlock; /* we already have a newer lease. */
  714. if (di->lease_session && di->lease_session != session)
  715. goto out_unlock;
  716. ceph_dentry_lru_touch(dentry);
  717. if (!di->lease_session)
  718. di->lease_session = ceph_get_mds_session(session);
  719. di->lease_gen = session->s_cap_gen;
  720. di->lease_seq = le32_to_cpu(lease->seq);
  721. di->lease_renew_after = half_ttl;
  722. di->lease_renew_from = 0;
  723. dentry->d_time = ttl;
  724. out_unlock:
  725. spin_unlock(&dentry->d_lock);
  726. return;
  727. }
  728. /*
  729. * Set dentry's directory position based on the current dir's max, and
  730. * order it in d_subdirs, so that dcache_readdir behaves.
  731. */
  732. static void ceph_set_dentry_offset(struct dentry *dn)
  733. {
  734. struct dentry *dir = dn->d_parent;
  735. struct inode *inode = dn->d_parent->d_inode;
  736. struct ceph_dentry_info *di;
  737. BUG_ON(!inode);
  738. di = ceph_dentry(dn);
  739. spin_lock(&inode->i_lock);
  740. if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
  741. spin_unlock(&inode->i_lock);
  742. return;
  743. }
  744. di->offset = ceph_inode(inode)->i_max_offset++;
  745. spin_unlock(&inode->i_lock);
  746. spin_lock(&dir->d_lock);
  747. spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
  748. list_move(&dn->d_u.d_child, &dir->d_subdirs);
  749. dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
  750. dn->d_u.d_child.prev, dn->d_u.d_child.next);
  751. spin_unlock(&dn->d_lock);
  752. spin_unlock(&dir->d_lock);
  753. }
  754. /*
  755. * splice a dentry to an inode.
  756. * caller must hold directory i_mutex for this to be safe.
  757. *
  758. * we will only rehash the resulting dentry if @prehash is
  759. * true; @prehash will be set to false (for the benefit of
  760. * the caller) if we fail.
  761. */
  762. static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
  763. bool *prehash, bool set_offset)
  764. {
  765. struct dentry *realdn;
  766. BUG_ON(dn->d_inode);
  767. /* dn must be unhashed */
  768. if (!d_unhashed(dn))
  769. d_drop(dn);
  770. realdn = d_materialise_unique(dn, in);
  771. if (IS_ERR(realdn)) {
  772. pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
  773. PTR_ERR(realdn), dn, in, ceph_vinop(in));
  774. if (prehash)
  775. *prehash = false; /* don't rehash on error */
  776. dn = realdn; /* note realdn contains the error */
  777. goto out;
  778. } else if (realdn) {
  779. dout("dn %p (%d) spliced with %p (%d) "
  780. "inode %p ino %llx.%llx\n",
  781. dn, dn->d_count,
  782. realdn, realdn->d_count,
  783. realdn->d_inode, ceph_vinop(realdn->d_inode));
  784. dput(dn);
  785. dn = realdn;
  786. } else {
  787. BUG_ON(!ceph_dentry(dn));
  788. dout("dn %p attached to %p ino %llx.%llx\n",
  789. dn, dn->d_inode, ceph_vinop(dn->d_inode));
  790. }
  791. if ((!prehash || *prehash) && d_unhashed(dn))
  792. d_rehash(dn);
  793. if (set_offset)
  794. ceph_set_dentry_offset(dn);
  795. out:
  796. return dn;
  797. }
  798. /*
  799. * Incorporate results into the local cache. This is either just
  800. * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
  801. * after a lookup).
  802. *
  803. * A reply may contain
  804. * a directory inode along with a dentry.
  805. * and/or a target inode
  806. *
  807. * Called with snap_rwsem (read).
  808. */
  809. int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
  810. struct ceph_mds_session *session)
  811. {
  812. struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
  813. struct inode *in = NULL;
  814. struct ceph_mds_reply_inode *ininfo;
  815. struct ceph_vino vino;
  816. struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
  817. int i = 0;
  818. int err = 0;
  819. dout("fill_trace %p is_dentry %d is_target %d\n", req,
  820. rinfo->head->is_dentry, rinfo->head->is_target);
  821. #if 0
  822. /*
  823. * Debugging hook:
  824. *
  825. * If we resend completed ops to a recovering mds, we get no
  826. * trace. Since that is very rare, pretend this is the case
  827. * to ensure the 'no trace' handlers in the callers behave.
  828. *
  829. * Fill in inodes unconditionally to avoid breaking cap
  830. * invariants.
  831. */
  832. if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
  833. pr_info("fill_trace faking empty trace on %lld %s\n",
  834. req->r_tid, ceph_mds_op_name(rinfo->head->op));
  835. if (rinfo->head->is_dentry) {
  836. rinfo->head->is_dentry = 0;
  837. err = fill_inode(req->r_locked_dir,
  838. &rinfo->diri, rinfo->dirfrag,
  839. session, req->r_request_started, -1);
  840. }
  841. if (rinfo->head->is_target) {
  842. rinfo->head->is_target = 0;
  843. ininfo = rinfo->targeti.in;
  844. vino.ino = le64_to_cpu(ininfo->ino);
  845. vino.snap = le64_to_cpu(ininfo->snapid);
  846. in = ceph_get_inode(sb, vino);
  847. err = fill_inode(in, &rinfo->targeti, NULL,
  848. session, req->r_request_started,
  849. req->r_fmode);
  850. iput(in);
  851. }
  852. }
  853. #endif
  854. if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
  855. dout("fill_trace reply is empty!\n");
  856. if (rinfo->head->result == 0 && req->r_locked_dir)
  857. ceph_invalidate_dir_request(req);
  858. return 0;
  859. }
  860. if (rinfo->head->is_dentry) {
  861. struct inode *dir = req->r_locked_dir;
  862. err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
  863. session, req->r_request_started, -1,
  864. &req->r_caps_reservation);
  865. if (err < 0)
  866. return err;
  867. }
  868. /*
  869. * ignore null lease/binding on snapdir ENOENT, or else we
  870. * will have trouble splicing in the virtual snapdir later
  871. */
  872. if (rinfo->head->is_dentry && !req->r_aborted &&
  873. (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
  874. fsc->mount_options->snapdir_name,
  875. req->r_dentry->d_name.len))) {
  876. /*
  877. * lookup link rename : null -> possibly existing inode
  878. * mknod symlink mkdir : null -> new inode
  879. * unlink : linked -> null
  880. */
  881. struct inode *dir = req->r_locked_dir;
  882. struct dentry *dn = req->r_dentry;
  883. bool have_dir_cap, have_lease;
  884. BUG_ON(!dn);
  885. BUG_ON(!dir);
  886. BUG_ON(dn->d_parent->d_inode != dir);
  887. BUG_ON(ceph_ino(dir) !=
  888. le64_to_cpu(rinfo->diri.in->ino));
  889. BUG_ON(ceph_snap(dir) !=
  890. le64_to_cpu(rinfo->diri.in->snapid));
  891. /* do we have a lease on the whole dir? */
  892. have_dir_cap =
  893. (le32_to_cpu(rinfo->diri.in->cap.caps) &
  894. CEPH_CAP_FILE_SHARED);
  895. /* do we have a dn lease? */
  896. have_lease = have_dir_cap ||
  897. (le16_to_cpu(rinfo->dlease->mask) &
  898. CEPH_LOCK_DN);
  899. if (!have_lease)
  900. dout("fill_trace no dentry lease or dir cap\n");
  901. /* rename? */
  902. if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
  903. dout(" src %p '%.*s' dst %p '%.*s'\n",
  904. req->r_old_dentry,
  905. req->r_old_dentry->d_name.len,
  906. req->r_old_dentry->d_name.name,
  907. dn, dn->d_name.len, dn->d_name.name);
  908. dout("fill_trace doing d_move %p -> %p\n",
  909. req->r_old_dentry, dn);
  910. /* d_move screws up d_subdirs order */
  911. ceph_i_clear(dir, CEPH_I_COMPLETE);
  912. d_move(req->r_old_dentry, dn);
  913. dout(" src %p '%.*s' dst %p '%.*s'\n",
  914. req->r_old_dentry,
  915. req->r_old_dentry->d_name.len,
  916. req->r_old_dentry->d_name.name,
  917. dn, dn->d_name.len, dn->d_name.name);
  918. /* ensure target dentry is invalidated, despite
  919. rehashing bug in vfs_rename_dir */
  920. ceph_invalidate_dentry_lease(dn);
  921. /* take overwritten dentry's readdir offset */
  922. dout("dn %p gets %p offset %lld (old offset %lld)\n",
  923. req->r_old_dentry, dn, ceph_dentry(dn)->offset,
  924. ceph_dentry(req->r_old_dentry)->offset);
  925. ceph_dentry(req->r_old_dentry)->offset =
  926. ceph_dentry(dn)->offset;
  927. dn = req->r_old_dentry; /* use old_dentry */
  928. in = dn->d_inode;
  929. }
  930. /* null dentry? */
  931. if (!rinfo->head->is_target) {
  932. dout("fill_trace null dentry\n");
  933. if (dn->d_inode) {
  934. dout("d_delete %p\n", dn);
  935. d_delete(dn);
  936. } else {
  937. dout("d_instantiate %p NULL\n", dn);
  938. d_instantiate(dn, NULL);
  939. if (have_lease && d_unhashed(dn))
  940. d_rehash(dn);
  941. update_dentry_lease(dn, rinfo->dlease,
  942. session,
  943. req->r_request_started);
  944. }
  945. goto done;
  946. }
  947. /* attach proper inode */
  948. ininfo = rinfo->targeti.in;
  949. vino.ino = le64_to_cpu(ininfo->ino);
  950. vino.snap = le64_to_cpu(ininfo->snapid);
  951. in = dn->d_inode;
  952. if (!in) {
  953. in = ceph_get_inode(sb, vino);
  954. if (IS_ERR(in)) {
  955. pr_err("fill_trace bad get_inode "
  956. "%llx.%llx\n", vino.ino, vino.snap);
  957. err = PTR_ERR(in);
  958. d_delete(dn);
  959. goto done;
  960. }
  961. dn = splice_dentry(dn, in, &have_lease, true);
  962. if (IS_ERR(dn)) {
  963. err = PTR_ERR(dn);
  964. goto done;
  965. }
  966. req->r_dentry = dn; /* may have spliced */
  967. igrab(in);
  968. } else if (ceph_ino(in) == vino.ino &&
  969. ceph_snap(in) == vino.snap) {
  970. igrab(in);
  971. } else {
  972. dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
  973. dn, in, ceph_ino(in), ceph_snap(in),
  974. vino.ino, vino.snap);
  975. have_lease = false;
  976. in = NULL;
  977. }
  978. if (have_lease)
  979. update_dentry_lease(dn, rinfo->dlease, session,
  980. req->r_request_started);
  981. dout(" final dn %p\n", dn);
  982. i++;
  983. } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
  984. req->r_op == CEPH_MDS_OP_MKSNAP) {
  985. struct dentry *dn = req->r_dentry;
  986. /* fill out a snapdir LOOKUPSNAP dentry */
  987. BUG_ON(!dn);
  988. BUG_ON(!req->r_locked_dir);
  989. BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
  990. ininfo = rinfo->targeti.in;
  991. vino.ino = le64_to_cpu(ininfo->ino);
  992. vino.snap = le64_to_cpu(ininfo->snapid);
  993. in = ceph_get_inode(sb, vino);
  994. if (IS_ERR(in)) {
  995. pr_err("fill_inode get_inode badness %llx.%llx\n",
  996. vino.ino, vino.snap);
  997. err = PTR_ERR(in);
  998. d_delete(dn);
  999. goto done;
  1000. }
  1001. dout(" linking snapped dir %p to dn %p\n", in, dn);
  1002. dn = splice_dentry(dn, in, NULL, true);
  1003. if (IS_ERR(dn)) {
  1004. err = PTR_ERR(dn);
  1005. goto done;
  1006. }
  1007. req->r_dentry = dn; /* may have spliced */
  1008. igrab(in);
  1009. rinfo->head->is_dentry = 1; /* fool notrace handlers */
  1010. }
  1011. if (rinfo->head->is_target) {
  1012. vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
  1013. vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
  1014. if (in == NULL || ceph_ino(in) != vino.ino ||
  1015. ceph_snap(in) != vino.snap) {
  1016. in = ceph_get_inode(sb, vino);
  1017. if (IS_ERR(in)) {
  1018. err = PTR_ERR(in);
  1019. goto done;
  1020. }
  1021. }
  1022. req->r_target_inode = in;
  1023. err = fill_inode(in,
  1024. &rinfo->targeti, NULL,
  1025. session, req->r_request_started,
  1026. (le32_to_cpu(rinfo->head->result) == 0) ?
  1027. req->r_fmode : -1,
  1028. &req->r_caps_reservation);
  1029. if (err < 0) {
  1030. pr_err("fill_inode badness %p %llx.%llx\n",
  1031. in, ceph_vinop(in));
  1032. goto done;
  1033. }
  1034. }
  1035. done:
  1036. dout("fill_trace done err=%d\n", err);
  1037. return err;
  1038. }
  1039. /*
  1040. * Prepopulate our cache with readdir results, leases, etc.
  1041. */
  1042. int ceph_readdir_prepopulate(struct ceph_mds_request *req,
  1043. struct ceph_mds_session *session)
  1044. {
  1045. struct dentry *parent = req->r_dentry;
  1046. struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
  1047. struct qstr dname;
  1048. struct dentry *dn;
  1049. struct inode *in;
  1050. int err = 0, i;
  1051. struct inode *snapdir = NULL;
  1052. struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
  1053. u64 frag = le32_to_cpu(rhead->args.readdir.frag);
  1054. struct ceph_dentry_info *di;
  1055. if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
  1056. snapdir = ceph_get_snapdir(parent->d_inode);
  1057. parent = d_find_alias(snapdir);
  1058. dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
  1059. rinfo->dir_nr, parent);
  1060. } else {
  1061. dout("readdir_prepopulate %d items under dn %p\n",
  1062. rinfo->dir_nr, parent);
  1063. if (rinfo->dir_dir)
  1064. ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
  1065. }
  1066. for (i = 0; i < rinfo->dir_nr; i++) {
  1067. struct ceph_vino vino;
  1068. dname.name = rinfo->dir_dname[i];
  1069. dname.len = rinfo->dir_dname_len[i];
  1070. dname.hash = full_name_hash(dname.name, dname.len);
  1071. vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
  1072. vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
  1073. retry_lookup:
  1074. dn = d_lookup(parent, &dname);
  1075. dout("d_lookup on parent=%p name=%.*s got %p\n",
  1076. parent, dname.len, dname.name, dn);
  1077. if (!dn) {
  1078. dn = d_alloc(parent, &dname);
  1079. dout("d_alloc %p '%.*s' = %p\n", parent,
  1080. dname.len, dname.name, dn);
  1081. if (dn == NULL) {
  1082. dout("d_alloc badness\n");
  1083. err = -ENOMEM;
  1084. goto out;
  1085. }
  1086. err = ceph_init_dentry(dn);
  1087. if (err < 0) {
  1088. dput(dn);
  1089. goto out;
  1090. }
  1091. } else if (dn->d_inode &&
  1092. (ceph_ino(dn->d_inode) != vino.ino ||
  1093. ceph_snap(dn->d_inode) != vino.snap)) {
  1094. dout(" dn %p points to wrong inode %p\n",
  1095. dn, dn->d_inode);
  1096. d_delete(dn);
  1097. dput(dn);
  1098. goto retry_lookup;
  1099. } else {
  1100. /* reorder parent's d_subdirs */
  1101. spin_lock(&parent->d_lock);
  1102. spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
  1103. list_move(&dn->d_u.d_child, &parent->d_subdirs);
  1104. spin_unlock(&dn->d_lock);
  1105. spin_unlock(&parent->d_lock);
  1106. }
  1107. di = dn->d_fsdata;
  1108. di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
  1109. /* inode */
  1110. if (dn->d_inode) {
  1111. in = dn->d_inode;
  1112. } else {
  1113. in = ceph_get_inode(parent->d_sb, vino);
  1114. if (IS_ERR(in)) {
  1115. dout("new_inode badness\n");
  1116. d_delete(dn);
  1117. dput(dn);
  1118. err = PTR_ERR(in);
  1119. goto out;
  1120. }
  1121. dn = splice_dentry(dn, in, NULL, false);
  1122. if (IS_ERR(dn))
  1123. dn = NULL;
  1124. }
  1125. if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
  1126. req->r_request_started, -1,
  1127. &req->r_caps_reservation) < 0) {
  1128. pr_err("fill_inode badness on %p\n", in);
  1129. goto next_item;
  1130. }
  1131. if (dn)
  1132. update_dentry_lease(dn, rinfo->dir_dlease[i],
  1133. req->r_session,
  1134. req->r_request_started);
  1135. next_item:
  1136. if (dn)
  1137. dput(dn);
  1138. }
  1139. req->r_did_prepopulate = true;
  1140. out:
  1141. if (snapdir) {
  1142. iput(snapdir);
  1143. dput(parent);
  1144. }
  1145. dout("readdir_prepopulate done\n");
  1146. return err;
  1147. }
  1148. int ceph_inode_set_size(struct inode *inode, loff_t size)
  1149. {
  1150. struct ceph_inode_info *ci = ceph_inode(inode);
  1151. int ret = 0;
  1152. spin_lock(&inode->i_lock);
  1153. dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
  1154. inode->i_size = size;
  1155. inode->i_blocks = (size + (1 << 9) - 1) >> 9;
  1156. /* tell the MDS if we are approaching max_size */
  1157. if ((size << 1) >= ci->i_max_size &&
  1158. (ci->i_reported_size << 1) < ci->i_max_size)
  1159. ret = 1;
  1160. spin_unlock(&inode->i_lock);
  1161. return ret;
  1162. }
  1163. /*
  1164. * Write back inode data in a worker thread. (This can't be done
  1165. * in the message handler context.)
  1166. */
  1167. void ceph_queue_writeback(struct inode *inode)
  1168. {
  1169. if (queue_work(ceph_inode_to_client(inode)->wb_wq,
  1170. &ceph_inode(inode)->i_wb_work)) {
  1171. dout("ceph_queue_writeback %p\n", inode);
  1172. igrab(inode);
  1173. } else {
  1174. dout("ceph_queue_writeback %p failed\n", inode);
  1175. }
  1176. }
  1177. static void ceph_writeback_work(struct work_struct *work)
  1178. {
  1179. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1180. i_wb_work);
  1181. struct inode *inode = &ci->vfs_inode;
  1182. dout("writeback %p\n", inode);
  1183. filemap_fdatawrite(&inode->i_data);
  1184. iput(inode);
  1185. }
  1186. /*
  1187. * queue an async invalidation
  1188. */
  1189. void ceph_queue_invalidate(struct inode *inode)
  1190. {
  1191. if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
  1192. &ceph_inode(inode)->i_pg_inv_work)) {
  1193. dout("ceph_queue_invalidate %p\n", inode);
  1194. igrab(inode);
  1195. } else {
  1196. dout("ceph_queue_invalidate %p failed\n", inode);
  1197. }
  1198. }
  1199. /*
  1200. * invalidate any pages that are not dirty or under writeback. this
  1201. * includes pages that are clean and mapped.
  1202. */
  1203. static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
  1204. {
  1205. struct pagevec pvec;
  1206. pgoff_t next = 0;
  1207. int i;
  1208. pagevec_init(&pvec, 0);
  1209. while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  1210. for (i = 0; i < pagevec_count(&pvec); i++) {
  1211. struct page *page = pvec.pages[i];
  1212. pgoff_t index;
  1213. int skip_page =
  1214. (PageDirty(page) || PageWriteback(page));
  1215. if (!skip_page)
  1216. skip_page = !trylock_page(page);
  1217. /*
  1218. * We really shouldn't be looking at the ->index of an
  1219. * unlocked page. But we're not allowed to lock these
  1220. * pages. So we rely upon nobody altering the ->index
  1221. * of this (pinned-by-us) page.
  1222. */
  1223. index = page->index;
  1224. if (index > next)
  1225. next = index;
  1226. next++;
  1227. if (skip_page)
  1228. continue;
  1229. generic_error_remove_page(mapping, page);
  1230. unlock_page(page);
  1231. }
  1232. pagevec_release(&pvec);
  1233. cond_resched();
  1234. }
  1235. }
  1236. /*
  1237. * Invalidate inode pages in a worker thread. (This can't be done
  1238. * in the message handler context.)
  1239. */
  1240. static void ceph_invalidate_work(struct work_struct *work)
  1241. {
  1242. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1243. i_pg_inv_work);
  1244. struct inode *inode = &ci->vfs_inode;
  1245. u32 orig_gen;
  1246. int check = 0;
  1247. spin_lock(&inode->i_lock);
  1248. dout("invalidate_pages %p gen %d revoking %d\n", inode,
  1249. ci->i_rdcache_gen, ci->i_rdcache_revoking);
  1250. if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
  1251. /* nevermind! */
  1252. spin_unlock(&inode->i_lock);
  1253. goto out;
  1254. }
  1255. orig_gen = ci->i_rdcache_gen;
  1256. spin_unlock(&inode->i_lock);
  1257. ceph_invalidate_nondirty_pages(inode->i_mapping);
  1258. spin_lock(&inode->i_lock);
  1259. if (orig_gen == ci->i_rdcache_gen &&
  1260. orig_gen == ci->i_rdcache_revoking) {
  1261. dout("invalidate_pages %p gen %d successful\n", inode,
  1262. ci->i_rdcache_gen);
  1263. ci->i_rdcache_revoking--;
  1264. check = 1;
  1265. } else {
  1266. dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
  1267. inode, orig_gen, ci->i_rdcache_gen,
  1268. ci->i_rdcache_revoking);
  1269. }
  1270. spin_unlock(&inode->i_lock);
  1271. if (check)
  1272. ceph_check_caps(ci, 0, NULL);
  1273. out:
  1274. iput(inode);
  1275. }
  1276. /*
  1277. * called by trunc_wq; take i_mutex ourselves
  1278. *
  1279. * We also truncate in a separate thread as well.
  1280. */
  1281. static void ceph_vmtruncate_work(struct work_struct *work)
  1282. {
  1283. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1284. i_vmtruncate_work);
  1285. struct inode *inode = &ci->vfs_inode;
  1286. dout("vmtruncate_work %p\n", inode);
  1287. mutex_lock(&inode->i_mutex);
  1288. __ceph_do_pending_vmtruncate(inode);
  1289. mutex_unlock(&inode->i_mutex);
  1290. iput(inode);
  1291. }
  1292. /*
  1293. * Queue an async vmtruncate. If we fail to queue work, we will handle
  1294. * the truncation the next time we call __ceph_do_pending_vmtruncate.
  1295. */
  1296. void ceph_queue_vmtruncate(struct inode *inode)
  1297. {
  1298. struct ceph_inode_info *ci = ceph_inode(inode);
  1299. if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
  1300. &ci->i_vmtruncate_work)) {
  1301. dout("ceph_queue_vmtruncate %p\n", inode);
  1302. igrab(inode);
  1303. } else {
  1304. dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
  1305. inode, ci->i_truncate_pending);
  1306. }
  1307. }
  1308. /*
  1309. * called with i_mutex held.
  1310. *
  1311. * Make sure any pending truncation is applied before doing anything
  1312. * that may depend on it.
  1313. */
  1314. void __ceph_do_pending_vmtruncate(struct inode *inode)
  1315. {
  1316. struct ceph_inode_info *ci = ceph_inode(inode);
  1317. u64 to;
  1318. int wrbuffer_refs, wake = 0;
  1319. retry:
  1320. spin_lock(&inode->i_lock);
  1321. if (ci->i_truncate_pending == 0) {
  1322. dout("__do_pending_vmtruncate %p none pending\n", inode);
  1323. spin_unlock(&inode->i_lock);
  1324. return;
  1325. }
  1326. /*
  1327. * make sure any dirty snapped pages are flushed before we
  1328. * possibly truncate them.. so write AND block!
  1329. */
  1330. if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
  1331. dout("__do_pending_vmtruncate %p flushing snaps first\n",
  1332. inode);
  1333. spin_unlock(&inode->i_lock);
  1334. filemap_write_and_wait_range(&inode->i_data, 0,
  1335. inode->i_sb->s_maxbytes);
  1336. goto retry;
  1337. }
  1338. to = ci->i_truncate_size;
  1339. wrbuffer_refs = ci->i_wrbuffer_ref;
  1340. dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
  1341. ci->i_truncate_pending, to);
  1342. spin_unlock(&inode->i_lock);
  1343. truncate_inode_pages(inode->i_mapping, to);
  1344. spin_lock(&inode->i_lock);
  1345. ci->i_truncate_pending--;
  1346. if (ci->i_truncate_pending == 0)
  1347. wake = 1;
  1348. spin_unlock(&inode->i_lock);
  1349. if (wrbuffer_refs == 0)
  1350. ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
  1351. if (wake)
  1352. wake_up_all(&ci->i_cap_wq);
  1353. }
  1354. /*
  1355. * symlinks
  1356. */
  1357. static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
  1358. {
  1359. struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
  1360. nd_set_link(nd, ci->i_symlink);
  1361. return NULL;
  1362. }
  1363. static const struct inode_operations ceph_symlink_iops = {
  1364. .readlink = generic_readlink,
  1365. .follow_link = ceph_sym_follow_link,
  1366. };
  1367. /*
  1368. * setattr
  1369. */
  1370. int ceph_setattr(struct dentry *dentry, struct iattr *attr)
  1371. {
  1372. struct inode *inode = dentry->d_inode;
  1373. struct ceph_inode_info *ci = ceph_inode(inode);
  1374. struct inode *parent_inode = dentry->d_parent->d_inode;
  1375. const unsigned int ia_valid = attr->ia_valid;
  1376. struct ceph_mds_request *req;
  1377. struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
  1378. int issued;
  1379. int release = 0, dirtied = 0;
  1380. int mask = 0;
  1381. int err = 0;
  1382. if (ceph_snap(inode) != CEPH_NOSNAP)
  1383. return -EROFS;
  1384. __ceph_do_pending_vmtruncate(inode);
  1385. err = inode_change_ok(inode, attr);
  1386. if (err != 0)
  1387. return err;
  1388. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
  1389. USE_AUTH_MDS);
  1390. if (IS_ERR(req))
  1391. return PTR_ERR(req);
  1392. spin_lock(&inode->i_lock);
  1393. issued = __ceph_caps_issued(ci, NULL);
  1394. dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
  1395. if (ia_valid & ATTR_UID) {
  1396. dout("setattr %p uid %d -> %d\n", inode,
  1397. inode->i_uid, attr->ia_uid);
  1398. if (issued & CEPH_CAP_AUTH_EXCL) {
  1399. inode->i_uid = attr->ia_uid;
  1400. dirtied |= CEPH_CAP_AUTH_EXCL;
  1401. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1402. attr->ia_uid != inode->i_uid) {
  1403. req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
  1404. mask |= CEPH_SETATTR_UID;
  1405. release |= CEPH_CAP_AUTH_SHARED;
  1406. }
  1407. }
  1408. if (ia_valid & ATTR_GID) {
  1409. dout("setattr %p gid %d -> %d\n", inode,
  1410. inode->i_gid, attr->ia_gid);
  1411. if (issued & CEPH_CAP_AUTH_EXCL) {
  1412. inode->i_gid = attr->ia_gid;
  1413. dirtied |= CEPH_CAP_AUTH_EXCL;
  1414. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1415. attr->ia_gid != inode->i_gid) {
  1416. req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
  1417. mask |= CEPH_SETATTR_GID;
  1418. release |= CEPH_CAP_AUTH_SHARED;
  1419. }
  1420. }
  1421. if (ia_valid & ATTR_MODE) {
  1422. dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
  1423. attr->ia_mode);
  1424. if (issued & CEPH_CAP_AUTH_EXCL) {
  1425. inode->i_mode = attr->ia_mode;
  1426. dirtied |= CEPH_CAP_AUTH_EXCL;
  1427. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1428. attr->ia_mode != inode->i_mode) {
  1429. req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
  1430. mask |= CEPH_SETATTR_MODE;
  1431. release |= CEPH_CAP_AUTH_SHARED;
  1432. }
  1433. }
  1434. if (ia_valid & ATTR_ATIME) {
  1435. dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
  1436. inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
  1437. attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
  1438. if (issued & CEPH_CAP_FILE_EXCL) {
  1439. ci->i_time_warp_seq++;
  1440. inode->i_atime = attr->ia_atime;
  1441. dirtied |= CEPH_CAP_FILE_EXCL;
  1442. } else if ((issued & CEPH_CAP_FILE_WR) &&
  1443. timespec_compare(&inode->i_atime,
  1444. &attr->ia_atime) < 0) {
  1445. inode->i_atime = attr->ia_atime;
  1446. dirtied |= CEPH_CAP_FILE_WR;
  1447. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1448. !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
  1449. ceph_encode_timespec(&req->r_args.setattr.atime,
  1450. &attr->ia_atime);
  1451. mask |= CEPH_SETATTR_ATIME;
  1452. release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
  1453. CEPH_CAP_FILE_WR;
  1454. }
  1455. }
  1456. if (ia_valid & ATTR_MTIME) {
  1457. dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
  1458. inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
  1459. attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
  1460. if (issued & CEPH_CAP_FILE_EXCL) {
  1461. ci->i_time_warp_seq++;
  1462. inode->i_mtime = attr->ia_mtime;
  1463. dirtied |= CEPH_CAP_FILE_EXCL;
  1464. } else if ((issued & CEPH_CAP_FILE_WR) &&
  1465. timespec_compare(&inode->i_mtime,
  1466. &attr->ia_mtime) < 0) {
  1467. inode->i_mtime = attr->ia_mtime;
  1468. dirtied |= CEPH_CAP_FILE_WR;
  1469. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1470. !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
  1471. ceph_encode_timespec(&req->r_args.setattr.mtime,
  1472. &attr->ia_mtime);
  1473. mask |= CEPH_SETATTR_MTIME;
  1474. release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
  1475. CEPH_CAP_FILE_WR;
  1476. }
  1477. }
  1478. if (ia_valid & ATTR_SIZE) {
  1479. dout("setattr %p size %lld -> %lld\n", inode,
  1480. inode->i_size, attr->ia_size);
  1481. if (attr->ia_size > inode->i_sb->s_maxbytes) {
  1482. err = -EINVAL;
  1483. goto out;
  1484. }
  1485. if ((issued & CEPH_CAP_FILE_EXCL) &&
  1486. attr->ia_size > inode->i_size) {
  1487. inode->i_size = attr->ia_size;
  1488. inode->i_blocks =
  1489. (attr->ia_size + (1 << 9) - 1) >> 9;
  1490. inode->i_ctime = attr->ia_ctime;
  1491. ci->i_reported_size = attr->ia_size;
  1492. dirtied |= CEPH_CAP_FILE_EXCL;
  1493. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1494. attr->ia_size != inode->i_size) {
  1495. req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
  1496. req->r_args.setattr.old_size =
  1497. cpu_to_le64(inode->i_size);
  1498. mask |= CEPH_SETATTR_SIZE;
  1499. release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
  1500. CEPH_CAP_FILE_WR;
  1501. }
  1502. }
  1503. /* these do nothing */
  1504. if (ia_valid & ATTR_CTIME) {
  1505. bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
  1506. ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
  1507. dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
  1508. inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
  1509. attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
  1510. only ? "ctime only" : "ignored");
  1511. inode->i_ctime = attr->ia_ctime;
  1512. if (only) {
  1513. /*
  1514. * if kernel wants to dirty ctime but nothing else,
  1515. * we need to choose a cap to dirty under, or do
  1516. * a almost-no-op setattr
  1517. */
  1518. if (issued & CEPH_CAP_AUTH_EXCL)
  1519. dirtied |= CEPH_CAP_AUTH_EXCL;
  1520. else if (issued & CEPH_CAP_FILE_EXCL)
  1521. dirtied |= CEPH_CAP_FILE_EXCL;
  1522. else if (issued & CEPH_CAP_XATTR_EXCL)
  1523. dirtied |= CEPH_CAP_XATTR_EXCL;
  1524. else
  1525. mask |= CEPH_SETATTR_CTIME;
  1526. }
  1527. }
  1528. if (ia_valid & ATTR_FILE)
  1529. dout("setattr %p ATTR_FILE ... hrm!\n", inode);
  1530. if (dirtied) {
  1531. __ceph_mark_dirty_caps(ci, dirtied);
  1532. inode->i_ctime = CURRENT_TIME;
  1533. }
  1534. release &= issued;
  1535. spin_unlock(&inode->i_lock);
  1536. if (mask) {
  1537. req->r_inode = igrab(inode);
  1538. req->r_inode_drop = release;
  1539. req->r_args.setattr.mask = cpu_to_le32(mask);
  1540. req->r_num_caps = 1;
  1541. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  1542. }
  1543. dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
  1544. ceph_cap_string(dirtied), mask);
  1545. ceph_mdsc_put_request(req);
  1546. __ceph_do_pending_vmtruncate(inode);
  1547. return err;
  1548. out:
  1549. spin_unlock(&inode->i_lock);
  1550. ceph_mdsc_put_request(req);
  1551. return err;
  1552. }
  1553. /*
  1554. * Verify that we have a lease on the given mask. If not,
  1555. * do a getattr against an mds.
  1556. */
  1557. int ceph_do_getattr(struct inode *inode, int mask)
  1558. {
  1559. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  1560. struct ceph_mds_client *mdsc = fsc->mdsc;
  1561. struct ceph_mds_request *req;
  1562. int err;
  1563. if (ceph_snap(inode) == CEPH_SNAPDIR) {
  1564. dout("do_getattr inode %p SNAPDIR\n", inode);
  1565. return 0;
  1566. }
  1567. dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
  1568. if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
  1569. return 0;
  1570. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
  1571. if (IS_ERR(req))
  1572. return PTR_ERR(req);
  1573. req->r_inode = igrab(inode);
  1574. req->r_num_caps = 1;
  1575. req->r_args.getattr.mask = cpu_to_le32(mask);
  1576. err = ceph_mdsc_do_request(mdsc, NULL, req);
  1577. ceph_mdsc_put_request(req);
  1578. dout("do_getattr result=%d\n", err);
  1579. return err;
  1580. }
  1581. /*
  1582. * Check inode permissions. We verify we have a valid value for
  1583. * the AUTH cap, then call the generic handler.
  1584. */
  1585. int ceph_permission(struct inode *inode, int mask, unsigned int flags)
  1586. {
  1587. int err;
  1588. if (flags & IPERM_FLAG_RCU)
  1589. return -ECHILD;
  1590. err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
  1591. if (!err)
  1592. err = generic_permission(inode, mask, flags, NULL);
  1593. return err;
  1594. }
  1595. /*
  1596. * Get all attributes. Hopefully somedata we'll have a statlite()
  1597. * and can limit the fields we require to be accurate.
  1598. */
  1599. int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
  1600. struct kstat *stat)
  1601. {
  1602. struct inode *inode = dentry->d_inode;
  1603. struct ceph_inode_info *ci = ceph_inode(inode);
  1604. int err;
  1605. err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
  1606. if (!err) {
  1607. generic_fillattr(inode, stat);
  1608. stat->ino = inode->i_ino;
  1609. if (ceph_snap(inode) != CEPH_NOSNAP)
  1610. stat->dev = ceph_snap(inode);
  1611. else
  1612. stat->dev = 0;
  1613. if (S_ISDIR(inode->i_mode)) {
  1614. stat->size = ci->i_rbytes;
  1615. stat->blocks = 0;
  1616. stat->blksize = 65536;
  1617. }
  1618. }
  1619. return err;
  1620. }