inode.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/fs.h>
  4. #include <linux/slab.h>
  5. #include <linux/string.h>
  6. #include <linux/uaccess.h>
  7. #include <linux/kernel.h>
  8. #include <linux/namei.h>
  9. #include <linux/writeback.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/pagevec.h>
  12. #include "super.h"
  13. #include "mds_client.h"
  14. #include <linux/ceph/decode.h>
  15. /*
  16. * Ceph inode operations
  17. *
  18. * Implement basic inode helpers (get, alloc) and inode ops (getattr,
  19. * setattr, etc.), xattr helpers, and helpers for assimilating
  20. * metadata returned by the MDS into our cache.
  21. *
  22. * Also define helpers for doing asynchronous writeback, invalidation,
  23. * and truncation for the benefit of those who can't afford to block
  24. * (typically because they are in the message handler path).
  25. */
  26. static const struct inode_operations ceph_symlink_iops;
  27. static void ceph_invalidate_work(struct work_struct *work);
  28. static void ceph_writeback_work(struct work_struct *work);
  29. static void ceph_vmtruncate_work(struct work_struct *work);
  30. /*
  31. * find or create an inode, given the ceph ino number
  32. */
  33. static int ceph_set_ino_cb(struct inode *inode, void *data)
  34. {
  35. ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
  36. inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
  37. return 0;
  38. }
  39. struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
  40. {
  41. struct inode *inode;
  42. ino_t t = ceph_vino_to_ino(vino);
  43. inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
  44. if (inode == NULL)
  45. return ERR_PTR(-ENOMEM);
  46. if (inode->i_state & I_NEW) {
  47. dout("get_inode created new inode %p %llx.%llx ino %llx\n",
  48. inode, ceph_vinop(inode), (u64)inode->i_ino);
  49. unlock_new_inode(inode);
  50. }
  51. dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
  52. vino.snap, inode);
  53. return inode;
  54. }
  55. /*
  56. * get/constuct snapdir inode for a given directory
  57. */
  58. struct inode *ceph_get_snapdir(struct inode *parent)
  59. {
  60. struct ceph_vino vino = {
  61. .ino = ceph_ino(parent),
  62. .snap = CEPH_SNAPDIR,
  63. };
  64. struct inode *inode = ceph_get_inode(parent->i_sb, vino);
  65. struct ceph_inode_info *ci = ceph_inode(inode);
  66. BUG_ON(!S_ISDIR(parent->i_mode));
  67. if (IS_ERR(inode))
  68. return inode;
  69. inode->i_mode = parent->i_mode;
  70. inode->i_uid = parent->i_uid;
  71. inode->i_gid = parent->i_gid;
  72. inode->i_op = &ceph_dir_iops;
  73. inode->i_fop = &ceph_dir_fops;
  74. ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
  75. ci->i_rbytes = 0;
  76. return inode;
  77. }
  78. const struct inode_operations ceph_file_iops = {
  79. .permission = ceph_permission,
  80. .setattr = ceph_setattr,
  81. .getattr = ceph_getattr,
  82. .setxattr = ceph_setxattr,
  83. .getxattr = ceph_getxattr,
  84. .listxattr = ceph_listxattr,
  85. .removexattr = ceph_removexattr,
  86. };
  87. /*
  88. * We use a 'frag tree' to keep track of the MDS's directory fragments
  89. * for a given inode (usually there is just a single fragment). We
  90. * need to know when a child frag is delegated to a new MDS, or when
  91. * it is flagged as replicated, so we can direct our requests
  92. * accordingly.
  93. */
  94. /*
  95. * find/create a frag in the tree
  96. */
  97. static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
  98. u32 f)
  99. {
  100. struct rb_node **p;
  101. struct rb_node *parent = NULL;
  102. struct ceph_inode_frag *frag;
  103. int c;
  104. p = &ci->i_fragtree.rb_node;
  105. while (*p) {
  106. parent = *p;
  107. frag = rb_entry(parent, struct ceph_inode_frag, node);
  108. c = ceph_frag_compare(f, frag->frag);
  109. if (c < 0)
  110. p = &(*p)->rb_left;
  111. else if (c > 0)
  112. p = &(*p)->rb_right;
  113. else
  114. return frag;
  115. }
  116. frag = kmalloc(sizeof(*frag), GFP_NOFS);
  117. if (!frag) {
  118. pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
  119. "frag %x\n", &ci->vfs_inode,
  120. ceph_vinop(&ci->vfs_inode), f);
  121. return ERR_PTR(-ENOMEM);
  122. }
  123. frag->frag = f;
  124. frag->split_by = 0;
  125. frag->mds = -1;
  126. frag->ndist = 0;
  127. rb_link_node(&frag->node, parent, p);
  128. rb_insert_color(&frag->node, &ci->i_fragtree);
  129. dout("get_or_create_frag added %llx.%llx frag %x\n",
  130. ceph_vinop(&ci->vfs_inode), f);
  131. return frag;
  132. }
  133. /*
  134. * find a specific frag @f
  135. */
  136. struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
  137. {
  138. struct rb_node *n = ci->i_fragtree.rb_node;
  139. while (n) {
  140. struct ceph_inode_frag *frag =
  141. rb_entry(n, struct ceph_inode_frag, node);
  142. int c = ceph_frag_compare(f, frag->frag);
  143. if (c < 0)
  144. n = n->rb_left;
  145. else if (c > 0)
  146. n = n->rb_right;
  147. else
  148. return frag;
  149. }
  150. return NULL;
  151. }
  152. /*
  153. * Choose frag containing the given value @v. If @pfrag is
  154. * specified, copy the frag delegation info to the caller if
  155. * it is present.
  156. */
  157. u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
  158. struct ceph_inode_frag *pfrag,
  159. int *found)
  160. {
  161. u32 t = ceph_frag_make(0, 0);
  162. struct ceph_inode_frag *frag;
  163. unsigned nway, i;
  164. u32 n;
  165. if (found)
  166. *found = 0;
  167. mutex_lock(&ci->i_fragtree_mutex);
  168. while (1) {
  169. WARN_ON(!ceph_frag_contains_value(t, v));
  170. frag = __ceph_find_frag(ci, t);
  171. if (!frag)
  172. break; /* t is a leaf */
  173. if (frag->split_by == 0) {
  174. if (pfrag)
  175. memcpy(pfrag, frag, sizeof(*pfrag));
  176. if (found)
  177. *found = 1;
  178. break;
  179. }
  180. /* choose child */
  181. nway = 1 << frag->split_by;
  182. dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
  183. frag->split_by, nway);
  184. for (i = 0; i < nway; i++) {
  185. n = ceph_frag_make_child(t, frag->split_by, i);
  186. if (ceph_frag_contains_value(n, v)) {
  187. t = n;
  188. break;
  189. }
  190. }
  191. BUG_ON(i == nway);
  192. }
  193. dout("choose_frag(%x) = %x\n", v, t);
  194. mutex_unlock(&ci->i_fragtree_mutex);
  195. return t;
  196. }
  197. /*
  198. * Process dirfrag (delegation) info from the mds. Include leaf
  199. * fragment in tree ONLY if ndist > 0. Otherwise, only
  200. * branches/splits are included in i_fragtree)
  201. */
  202. static int ceph_fill_dirfrag(struct inode *inode,
  203. struct ceph_mds_reply_dirfrag *dirinfo)
  204. {
  205. struct ceph_inode_info *ci = ceph_inode(inode);
  206. struct ceph_inode_frag *frag;
  207. u32 id = le32_to_cpu(dirinfo->frag);
  208. int mds = le32_to_cpu(dirinfo->auth);
  209. int ndist = le32_to_cpu(dirinfo->ndist);
  210. int i;
  211. int err = 0;
  212. mutex_lock(&ci->i_fragtree_mutex);
  213. if (ndist == 0) {
  214. /* no delegation info needed. */
  215. frag = __ceph_find_frag(ci, id);
  216. if (!frag)
  217. goto out;
  218. if (frag->split_by == 0) {
  219. /* tree leaf, remove */
  220. dout("fill_dirfrag removed %llx.%llx frag %x"
  221. " (no ref)\n", ceph_vinop(inode), id);
  222. rb_erase(&frag->node, &ci->i_fragtree);
  223. kfree(frag);
  224. } else {
  225. /* tree branch, keep and clear */
  226. dout("fill_dirfrag cleared %llx.%llx frag %x"
  227. " referral\n", ceph_vinop(inode), id);
  228. frag->mds = -1;
  229. frag->ndist = 0;
  230. }
  231. goto out;
  232. }
  233. /* find/add this frag to store mds delegation info */
  234. frag = __get_or_create_frag(ci, id);
  235. if (IS_ERR(frag)) {
  236. /* this is not the end of the world; we can continue
  237. with bad/inaccurate delegation info */
  238. pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
  239. ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
  240. err = -ENOMEM;
  241. goto out;
  242. }
  243. frag->mds = mds;
  244. frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
  245. for (i = 0; i < frag->ndist; i++)
  246. frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
  247. dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
  248. ceph_vinop(inode), frag->frag, frag->ndist);
  249. out:
  250. mutex_unlock(&ci->i_fragtree_mutex);
  251. return err;
  252. }
  253. /*
  254. * initialize a newly allocated inode.
  255. */
  256. struct inode *ceph_alloc_inode(struct super_block *sb)
  257. {
  258. struct ceph_inode_info *ci;
  259. int i;
  260. ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
  261. if (!ci)
  262. return NULL;
  263. dout("alloc_inode %p\n", &ci->vfs_inode);
  264. ci->i_version = 0;
  265. ci->i_time_warp_seq = 0;
  266. ci->i_ceph_flags = 0;
  267. ci->i_release_count = 0;
  268. ci->i_symlink = NULL;
  269. memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
  270. ci->i_fragtree = RB_ROOT;
  271. mutex_init(&ci->i_fragtree_mutex);
  272. ci->i_xattrs.blob = NULL;
  273. ci->i_xattrs.prealloc_blob = NULL;
  274. ci->i_xattrs.dirty = false;
  275. ci->i_xattrs.index = RB_ROOT;
  276. ci->i_xattrs.count = 0;
  277. ci->i_xattrs.names_size = 0;
  278. ci->i_xattrs.vals_size = 0;
  279. ci->i_xattrs.version = 0;
  280. ci->i_xattrs.index_version = 0;
  281. ci->i_caps = RB_ROOT;
  282. ci->i_auth_cap = NULL;
  283. ci->i_dirty_caps = 0;
  284. ci->i_flushing_caps = 0;
  285. INIT_LIST_HEAD(&ci->i_dirty_item);
  286. INIT_LIST_HEAD(&ci->i_flushing_item);
  287. ci->i_cap_flush_seq = 0;
  288. ci->i_cap_flush_last_tid = 0;
  289. memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
  290. init_waitqueue_head(&ci->i_cap_wq);
  291. ci->i_hold_caps_min = 0;
  292. ci->i_hold_caps_max = 0;
  293. INIT_LIST_HEAD(&ci->i_cap_delay_list);
  294. ci->i_cap_exporting_mds = 0;
  295. ci->i_cap_exporting_mseq = 0;
  296. ci->i_cap_exporting_issued = 0;
  297. INIT_LIST_HEAD(&ci->i_cap_snaps);
  298. ci->i_head_snapc = NULL;
  299. ci->i_snap_caps = 0;
  300. for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
  301. ci->i_nr_by_mode[i] = 0;
  302. ci->i_truncate_seq = 0;
  303. ci->i_truncate_size = 0;
  304. ci->i_truncate_pending = 0;
  305. ci->i_max_size = 0;
  306. ci->i_reported_size = 0;
  307. ci->i_wanted_max_size = 0;
  308. ci->i_requested_max_size = 0;
  309. ci->i_pin_ref = 0;
  310. ci->i_rd_ref = 0;
  311. ci->i_rdcache_ref = 0;
  312. ci->i_wr_ref = 0;
  313. ci->i_wb_ref = 0;
  314. ci->i_wrbuffer_ref = 0;
  315. ci->i_wrbuffer_ref_head = 0;
  316. ci->i_shared_gen = 0;
  317. ci->i_rdcache_gen = 0;
  318. ci->i_rdcache_revoking = 0;
  319. INIT_LIST_HEAD(&ci->i_unsafe_writes);
  320. INIT_LIST_HEAD(&ci->i_unsafe_dirops);
  321. spin_lock_init(&ci->i_unsafe_lock);
  322. ci->i_snap_realm = NULL;
  323. INIT_LIST_HEAD(&ci->i_snap_realm_item);
  324. INIT_LIST_HEAD(&ci->i_snap_flush_item);
  325. INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
  326. INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
  327. INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
  328. return &ci->vfs_inode;
  329. }
  330. static void ceph_i_callback(struct rcu_head *head)
  331. {
  332. struct inode *inode = container_of(head, struct inode, i_rcu);
  333. struct ceph_inode_info *ci = ceph_inode(inode);
  334. INIT_LIST_HEAD(&inode->i_dentry);
  335. kmem_cache_free(ceph_inode_cachep, ci);
  336. }
  337. void ceph_destroy_inode(struct inode *inode)
  338. {
  339. struct ceph_inode_info *ci = ceph_inode(inode);
  340. struct ceph_inode_frag *frag;
  341. struct rb_node *n;
  342. dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
  343. ceph_queue_caps_release(inode);
  344. /*
  345. * we may still have a snap_realm reference if there are stray
  346. * caps in i_cap_exporting_issued or i_snap_caps.
  347. */
  348. if (ci->i_snap_realm) {
  349. struct ceph_mds_client *mdsc =
  350. ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
  351. struct ceph_snap_realm *realm = ci->i_snap_realm;
  352. dout(" dropping residual ref to snap realm %p\n", realm);
  353. spin_lock(&realm->inodes_with_caps_lock);
  354. list_del_init(&ci->i_snap_realm_item);
  355. spin_unlock(&realm->inodes_with_caps_lock);
  356. ceph_put_snap_realm(mdsc, realm);
  357. }
  358. kfree(ci->i_symlink);
  359. while ((n = rb_first(&ci->i_fragtree)) != NULL) {
  360. frag = rb_entry(n, struct ceph_inode_frag, node);
  361. rb_erase(n, &ci->i_fragtree);
  362. kfree(frag);
  363. }
  364. __ceph_destroy_xattrs(ci);
  365. if (ci->i_xattrs.blob)
  366. ceph_buffer_put(ci->i_xattrs.blob);
  367. if (ci->i_xattrs.prealloc_blob)
  368. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  369. call_rcu(&inode->i_rcu, ceph_i_callback);
  370. }
  371. /*
  372. * Helpers to fill in size, ctime, mtime, and atime. We have to be
  373. * careful because either the client or MDS may have more up to date
  374. * info, depending on which capabilities are held, and whether
  375. * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
  376. * and size are monotonically increasing, except when utimes() or
  377. * truncate() increments the corresponding _seq values.)
  378. */
  379. int ceph_fill_file_size(struct inode *inode, int issued,
  380. u32 truncate_seq, u64 truncate_size, u64 size)
  381. {
  382. struct ceph_inode_info *ci = ceph_inode(inode);
  383. int queue_trunc = 0;
  384. if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
  385. (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
  386. dout("size %lld -> %llu\n", inode->i_size, size);
  387. inode->i_size = size;
  388. inode->i_blocks = (size + (1<<9) - 1) >> 9;
  389. ci->i_reported_size = size;
  390. if (truncate_seq != ci->i_truncate_seq) {
  391. dout("truncate_seq %u -> %u\n",
  392. ci->i_truncate_seq, truncate_seq);
  393. ci->i_truncate_seq = truncate_seq;
  394. /*
  395. * If we hold relevant caps, or in the case where we're
  396. * not the only client referencing this file and we
  397. * don't hold those caps, then we need to check whether
  398. * the file is either opened or mmaped
  399. */
  400. if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
  401. CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
  402. CEPH_CAP_FILE_EXCL|
  403. CEPH_CAP_FILE_LAZYIO)) ||
  404. mapping_mapped(inode->i_mapping) ||
  405. __ceph_caps_file_wanted(ci)) {
  406. ci->i_truncate_pending++;
  407. queue_trunc = 1;
  408. }
  409. }
  410. }
  411. if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
  412. ci->i_truncate_size != truncate_size) {
  413. dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
  414. truncate_size);
  415. ci->i_truncate_size = truncate_size;
  416. }
  417. return queue_trunc;
  418. }
  419. void ceph_fill_file_time(struct inode *inode, int issued,
  420. u64 time_warp_seq, struct timespec *ctime,
  421. struct timespec *mtime, struct timespec *atime)
  422. {
  423. struct ceph_inode_info *ci = ceph_inode(inode);
  424. int warn = 0;
  425. if (issued & (CEPH_CAP_FILE_EXCL|
  426. CEPH_CAP_FILE_WR|
  427. CEPH_CAP_FILE_BUFFER|
  428. CEPH_CAP_AUTH_EXCL|
  429. CEPH_CAP_XATTR_EXCL)) {
  430. if (timespec_compare(ctime, &inode->i_ctime) > 0) {
  431. dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
  432. inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
  433. ctime->tv_sec, ctime->tv_nsec);
  434. inode->i_ctime = *ctime;
  435. }
  436. if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
  437. /* the MDS did a utimes() */
  438. dout("mtime %ld.%09ld -> %ld.%09ld "
  439. "tw %d -> %d\n",
  440. inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
  441. mtime->tv_sec, mtime->tv_nsec,
  442. ci->i_time_warp_seq, (int)time_warp_seq);
  443. inode->i_mtime = *mtime;
  444. inode->i_atime = *atime;
  445. ci->i_time_warp_seq = time_warp_seq;
  446. } else if (time_warp_seq == ci->i_time_warp_seq) {
  447. /* nobody did utimes(); take the max */
  448. if (timespec_compare(mtime, &inode->i_mtime) > 0) {
  449. dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
  450. inode->i_mtime.tv_sec,
  451. inode->i_mtime.tv_nsec,
  452. mtime->tv_sec, mtime->tv_nsec);
  453. inode->i_mtime = *mtime;
  454. }
  455. if (timespec_compare(atime, &inode->i_atime) > 0) {
  456. dout("atime %ld.%09ld -> %ld.%09ld inc\n",
  457. inode->i_atime.tv_sec,
  458. inode->i_atime.tv_nsec,
  459. atime->tv_sec, atime->tv_nsec);
  460. inode->i_atime = *atime;
  461. }
  462. } else if (issued & CEPH_CAP_FILE_EXCL) {
  463. /* we did a utimes(); ignore mds values */
  464. } else {
  465. warn = 1;
  466. }
  467. } else {
  468. /* we have no write|excl caps; whatever the MDS says is true */
  469. if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
  470. inode->i_ctime = *ctime;
  471. inode->i_mtime = *mtime;
  472. inode->i_atime = *atime;
  473. ci->i_time_warp_seq = time_warp_seq;
  474. } else {
  475. warn = 1;
  476. }
  477. }
  478. if (warn) /* time_warp_seq shouldn't go backwards */
  479. dout("%p mds time_warp_seq %llu < %u\n",
  480. inode, time_warp_seq, ci->i_time_warp_seq);
  481. }
  482. /*
  483. * Populate an inode based on info from mds. May be called on new or
  484. * existing inodes.
  485. */
  486. static int fill_inode(struct inode *inode,
  487. struct ceph_mds_reply_info_in *iinfo,
  488. struct ceph_mds_reply_dirfrag *dirinfo,
  489. struct ceph_mds_session *session,
  490. unsigned long ttl_from, int cap_fmode,
  491. struct ceph_cap_reservation *caps_reservation)
  492. {
  493. struct ceph_mds_reply_inode *info = iinfo->in;
  494. struct ceph_inode_info *ci = ceph_inode(inode);
  495. int i;
  496. int issued = 0, implemented;
  497. int updating_inode = 0;
  498. struct timespec mtime, atime, ctime;
  499. u32 nsplits;
  500. struct ceph_buffer *xattr_blob = NULL;
  501. int err = 0;
  502. int queue_trunc = 0;
  503. dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
  504. inode, ceph_vinop(inode), le64_to_cpu(info->version),
  505. ci->i_version);
  506. /*
  507. * prealloc xattr data, if it looks like we'll need it. only
  508. * if len > 4 (meaning there are actually xattrs; the first 4
  509. * bytes are the xattr count).
  510. */
  511. if (iinfo->xattr_len > 4) {
  512. xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
  513. if (!xattr_blob)
  514. pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
  515. iinfo->xattr_len);
  516. }
  517. spin_lock(&inode->i_lock);
  518. /*
  519. * provided version will be odd if inode value is projected,
  520. * even if stable. skip the update if we have newer stable
  521. * info (ours>=theirs, e.g. due to racing mds replies), unless
  522. * we are getting projected (unstable) info (in which case the
  523. * version is odd, and we want ours>theirs).
  524. * us them
  525. * 2 2 skip
  526. * 3 2 skip
  527. * 3 3 update
  528. */
  529. if (le64_to_cpu(info->version) > 0 &&
  530. (ci->i_version & ~1) >= le64_to_cpu(info->version))
  531. goto no_change;
  532. updating_inode = 1;
  533. issued = __ceph_caps_issued(ci, &implemented);
  534. issued |= implemented | __ceph_caps_dirty(ci);
  535. /* update inode */
  536. ci->i_version = le64_to_cpu(info->version);
  537. inode->i_version++;
  538. inode->i_rdev = le32_to_cpu(info->rdev);
  539. if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
  540. inode->i_mode = le32_to_cpu(info->mode);
  541. inode->i_uid = le32_to_cpu(info->uid);
  542. inode->i_gid = le32_to_cpu(info->gid);
  543. dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
  544. inode->i_uid, inode->i_gid);
  545. }
  546. if ((issued & CEPH_CAP_LINK_EXCL) == 0)
  547. inode->i_nlink = le32_to_cpu(info->nlink);
  548. /* be careful with mtime, atime, size */
  549. ceph_decode_timespec(&atime, &info->atime);
  550. ceph_decode_timespec(&mtime, &info->mtime);
  551. ceph_decode_timespec(&ctime, &info->ctime);
  552. queue_trunc = ceph_fill_file_size(inode, issued,
  553. le32_to_cpu(info->truncate_seq),
  554. le64_to_cpu(info->truncate_size),
  555. le64_to_cpu(info->size));
  556. ceph_fill_file_time(inode, issued,
  557. le32_to_cpu(info->time_warp_seq),
  558. &ctime, &mtime, &atime);
  559. /* only update max_size on auth cap */
  560. if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
  561. ci->i_max_size != le64_to_cpu(info->max_size)) {
  562. dout("max_size %lld -> %llu\n", ci->i_max_size,
  563. le64_to_cpu(info->max_size));
  564. ci->i_max_size = le64_to_cpu(info->max_size);
  565. }
  566. ci->i_layout = info->layout;
  567. inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
  568. /* xattrs */
  569. /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
  570. if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
  571. le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
  572. if (ci->i_xattrs.blob)
  573. ceph_buffer_put(ci->i_xattrs.blob);
  574. ci->i_xattrs.blob = xattr_blob;
  575. if (xattr_blob)
  576. memcpy(ci->i_xattrs.blob->vec.iov_base,
  577. iinfo->xattr_data, iinfo->xattr_len);
  578. ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
  579. xattr_blob = NULL;
  580. }
  581. inode->i_mapping->a_ops = &ceph_aops;
  582. inode->i_mapping->backing_dev_info =
  583. &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
  584. switch (inode->i_mode & S_IFMT) {
  585. case S_IFIFO:
  586. case S_IFBLK:
  587. case S_IFCHR:
  588. case S_IFSOCK:
  589. init_special_inode(inode, inode->i_mode, inode->i_rdev);
  590. inode->i_op = &ceph_file_iops;
  591. break;
  592. case S_IFREG:
  593. inode->i_op = &ceph_file_iops;
  594. inode->i_fop = &ceph_file_fops;
  595. break;
  596. case S_IFLNK:
  597. inode->i_op = &ceph_symlink_iops;
  598. if (!ci->i_symlink) {
  599. int symlen = iinfo->symlink_len;
  600. char *sym;
  601. BUG_ON(symlen != inode->i_size);
  602. spin_unlock(&inode->i_lock);
  603. err = -ENOMEM;
  604. sym = kmalloc(symlen+1, GFP_NOFS);
  605. if (!sym)
  606. goto out;
  607. memcpy(sym, iinfo->symlink, symlen);
  608. sym[symlen] = 0;
  609. spin_lock(&inode->i_lock);
  610. if (!ci->i_symlink)
  611. ci->i_symlink = sym;
  612. else
  613. kfree(sym); /* lost a race */
  614. }
  615. break;
  616. case S_IFDIR:
  617. inode->i_op = &ceph_dir_iops;
  618. inode->i_fop = &ceph_dir_fops;
  619. ci->i_dir_layout = iinfo->dir_layout;
  620. ci->i_files = le64_to_cpu(info->files);
  621. ci->i_subdirs = le64_to_cpu(info->subdirs);
  622. ci->i_rbytes = le64_to_cpu(info->rbytes);
  623. ci->i_rfiles = le64_to_cpu(info->rfiles);
  624. ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
  625. ceph_decode_timespec(&ci->i_rctime, &info->rctime);
  626. break;
  627. default:
  628. pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
  629. ceph_vinop(inode), inode->i_mode);
  630. }
  631. no_change:
  632. spin_unlock(&inode->i_lock);
  633. /* queue truncate if we saw i_size decrease */
  634. if (queue_trunc)
  635. ceph_queue_vmtruncate(inode);
  636. /* populate frag tree */
  637. /* FIXME: move me up, if/when version reflects fragtree changes */
  638. nsplits = le32_to_cpu(info->fragtree.nsplits);
  639. mutex_lock(&ci->i_fragtree_mutex);
  640. for (i = 0; i < nsplits; i++) {
  641. u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
  642. struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
  643. if (IS_ERR(frag))
  644. continue;
  645. frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
  646. dout(" frag %x split by %d\n", frag->frag, frag->split_by);
  647. }
  648. mutex_unlock(&ci->i_fragtree_mutex);
  649. /* were we issued a capability? */
  650. if (info->cap.caps) {
  651. if (ceph_snap(inode) == CEPH_NOSNAP) {
  652. ceph_add_cap(inode, session,
  653. le64_to_cpu(info->cap.cap_id),
  654. cap_fmode,
  655. le32_to_cpu(info->cap.caps),
  656. le32_to_cpu(info->cap.wanted),
  657. le32_to_cpu(info->cap.seq),
  658. le32_to_cpu(info->cap.mseq),
  659. le64_to_cpu(info->cap.realm),
  660. info->cap.flags,
  661. caps_reservation);
  662. } else {
  663. spin_lock(&inode->i_lock);
  664. dout(" %p got snap_caps %s\n", inode,
  665. ceph_cap_string(le32_to_cpu(info->cap.caps)));
  666. ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
  667. if (cap_fmode >= 0)
  668. __ceph_get_fmode(ci, cap_fmode);
  669. spin_unlock(&inode->i_lock);
  670. }
  671. } else if (cap_fmode >= 0) {
  672. pr_warning("mds issued no caps on %llx.%llx\n",
  673. ceph_vinop(inode));
  674. __ceph_get_fmode(ci, cap_fmode);
  675. }
  676. /* set dir completion flag? */
  677. if (S_ISDIR(inode->i_mode) &&
  678. updating_inode && /* didn't jump to no_change */
  679. ci->i_files == 0 && ci->i_subdirs == 0 &&
  680. ceph_snap(inode) == CEPH_NOSNAP &&
  681. (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
  682. (issued & CEPH_CAP_FILE_EXCL) == 0 &&
  683. (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
  684. dout(" marking %p complete (empty)\n", inode);
  685. /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
  686. ci->i_max_offset = 2;
  687. }
  688. /* update delegation info? */
  689. if (dirinfo)
  690. ceph_fill_dirfrag(inode, dirinfo);
  691. err = 0;
  692. out:
  693. if (xattr_blob)
  694. ceph_buffer_put(xattr_blob);
  695. return err;
  696. }
  697. /*
  698. * caller should hold session s_mutex.
  699. */
  700. static void update_dentry_lease(struct dentry *dentry,
  701. struct ceph_mds_reply_lease *lease,
  702. struct ceph_mds_session *session,
  703. unsigned long from_time)
  704. {
  705. struct ceph_dentry_info *di = ceph_dentry(dentry);
  706. long unsigned duration = le32_to_cpu(lease->duration_ms);
  707. long unsigned ttl = from_time + (duration * HZ) / 1000;
  708. long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
  709. struct inode *dir;
  710. /* only track leases on regular dentries */
  711. if (dentry->d_op != &ceph_dentry_ops)
  712. return;
  713. spin_lock(&dentry->d_lock);
  714. dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
  715. dentry, duration, ttl);
  716. /* make lease_rdcache_gen match directory */
  717. dir = dentry->d_parent->d_inode;
  718. di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
  719. if (duration == 0)
  720. goto out_unlock;
  721. if (di->lease_gen == session->s_cap_gen &&
  722. time_before(ttl, dentry->d_time))
  723. goto out_unlock; /* we already have a newer lease. */
  724. if (di->lease_session && di->lease_session != session)
  725. goto out_unlock;
  726. ceph_dentry_lru_touch(dentry);
  727. if (!di->lease_session)
  728. di->lease_session = ceph_get_mds_session(session);
  729. di->lease_gen = session->s_cap_gen;
  730. di->lease_seq = le32_to_cpu(lease->seq);
  731. di->lease_renew_after = half_ttl;
  732. di->lease_renew_from = 0;
  733. dentry->d_time = ttl;
  734. out_unlock:
  735. spin_unlock(&dentry->d_lock);
  736. return;
  737. }
  738. /*
  739. * Set dentry's directory position based on the current dir's max, and
  740. * order it in d_subdirs, so that dcache_readdir behaves.
  741. *
  742. * Always called under directory's i_mutex.
  743. */
  744. static void ceph_set_dentry_offset(struct dentry *dn)
  745. {
  746. struct dentry *dir = dn->d_parent;
  747. struct inode *inode = dir->d_inode;
  748. struct ceph_dentry_info *di;
  749. BUG_ON(!inode);
  750. di = ceph_dentry(dn);
  751. spin_lock(&inode->i_lock);
  752. if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
  753. spin_unlock(&inode->i_lock);
  754. return;
  755. }
  756. di->offset = ceph_inode(inode)->i_max_offset++;
  757. spin_unlock(&inode->i_lock);
  758. spin_lock(&dir->d_lock);
  759. spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
  760. list_move(&dn->d_u.d_child, &dir->d_subdirs);
  761. dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
  762. dn->d_u.d_child.prev, dn->d_u.d_child.next);
  763. spin_unlock(&dn->d_lock);
  764. spin_unlock(&dir->d_lock);
  765. }
  766. /*
  767. * splice a dentry to an inode.
  768. * caller must hold directory i_mutex for this to be safe.
  769. *
  770. * we will only rehash the resulting dentry if @prehash is
  771. * true; @prehash will be set to false (for the benefit of
  772. * the caller) if we fail.
  773. */
  774. static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
  775. bool *prehash, bool set_offset)
  776. {
  777. struct dentry *realdn;
  778. BUG_ON(dn->d_inode);
  779. /* dn must be unhashed */
  780. if (!d_unhashed(dn))
  781. d_drop(dn);
  782. realdn = d_materialise_unique(dn, in);
  783. if (IS_ERR(realdn)) {
  784. pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
  785. PTR_ERR(realdn), dn, in, ceph_vinop(in));
  786. if (prehash)
  787. *prehash = false; /* don't rehash on error */
  788. dn = realdn; /* note realdn contains the error */
  789. goto out;
  790. } else if (realdn) {
  791. dout("dn %p (%d) spliced with %p (%d) "
  792. "inode %p ino %llx.%llx\n",
  793. dn, dn->d_count,
  794. realdn, realdn->d_count,
  795. realdn->d_inode, ceph_vinop(realdn->d_inode));
  796. dput(dn);
  797. dn = realdn;
  798. } else {
  799. BUG_ON(!ceph_dentry(dn));
  800. dout("dn %p attached to %p ino %llx.%llx\n",
  801. dn, dn->d_inode, ceph_vinop(dn->d_inode));
  802. }
  803. if ((!prehash || *prehash) && d_unhashed(dn))
  804. d_rehash(dn);
  805. if (set_offset)
  806. ceph_set_dentry_offset(dn);
  807. out:
  808. return dn;
  809. }
  810. /*
  811. * Incorporate results into the local cache. This is either just
  812. * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
  813. * after a lookup).
  814. *
  815. * A reply may contain
  816. * a directory inode along with a dentry.
  817. * and/or a target inode
  818. *
  819. * Called with snap_rwsem (read).
  820. */
  821. int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
  822. struct ceph_mds_session *session)
  823. {
  824. struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
  825. struct inode *in = NULL;
  826. struct ceph_mds_reply_inode *ininfo;
  827. struct ceph_vino vino;
  828. struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
  829. int i = 0;
  830. int err = 0;
  831. dout("fill_trace %p is_dentry %d is_target %d\n", req,
  832. rinfo->head->is_dentry, rinfo->head->is_target);
  833. #if 0
  834. /*
  835. * Debugging hook:
  836. *
  837. * If we resend completed ops to a recovering mds, we get no
  838. * trace. Since that is very rare, pretend this is the case
  839. * to ensure the 'no trace' handlers in the callers behave.
  840. *
  841. * Fill in inodes unconditionally to avoid breaking cap
  842. * invariants.
  843. */
  844. if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
  845. pr_info("fill_trace faking empty trace on %lld %s\n",
  846. req->r_tid, ceph_mds_op_name(rinfo->head->op));
  847. if (rinfo->head->is_dentry) {
  848. rinfo->head->is_dentry = 0;
  849. err = fill_inode(req->r_locked_dir,
  850. &rinfo->diri, rinfo->dirfrag,
  851. session, req->r_request_started, -1);
  852. }
  853. if (rinfo->head->is_target) {
  854. rinfo->head->is_target = 0;
  855. ininfo = rinfo->targeti.in;
  856. vino.ino = le64_to_cpu(ininfo->ino);
  857. vino.snap = le64_to_cpu(ininfo->snapid);
  858. in = ceph_get_inode(sb, vino);
  859. err = fill_inode(in, &rinfo->targeti, NULL,
  860. session, req->r_request_started,
  861. req->r_fmode);
  862. iput(in);
  863. }
  864. }
  865. #endif
  866. if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
  867. dout("fill_trace reply is empty!\n");
  868. if (rinfo->head->result == 0 && req->r_locked_dir)
  869. ceph_invalidate_dir_request(req);
  870. return 0;
  871. }
  872. if (rinfo->head->is_dentry) {
  873. struct inode *dir = req->r_locked_dir;
  874. err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
  875. session, req->r_request_started, -1,
  876. &req->r_caps_reservation);
  877. if (err < 0)
  878. return err;
  879. }
  880. /*
  881. * ignore null lease/binding on snapdir ENOENT, or else we
  882. * will have trouble splicing in the virtual snapdir later
  883. */
  884. if (rinfo->head->is_dentry && !req->r_aborted &&
  885. (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
  886. fsc->mount_options->snapdir_name,
  887. req->r_dentry->d_name.len))) {
  888. /*
  889. * lookup link rename : null -> possibly existing inode
  890. * mknod symlink mkdir : null -> new inode
  891. * unlink : linked -> null
  892. */
  893. struct inode *dir = req->r_locked_dir;
  894. struct dentry *dn = req->r_dentry;
  895. bool have_dir_cap, have_lease;
  896. BUG_ON(!dn);
  897. BUG_ON(!dir);
  898. BUG_ON(dn->d_parent->d_inode != dir);
  899. BUG_ON(ceph_ino(dir) !=
  900. le64_to_cpu(rinfo->diri.in->ino));
  901. BUG_ON(ceph_snap(dir) !=
  902. le64_to_cpu(rinfo->diri.in->snapid));
  903. /* do we have a lease on the whole dir? */
  904. have_dir_cap =
  905. (le32_to_cpu(rinfo->diri.in->cap.caps) &
  906. CEPH_CAP_FILE_SHARED);
  907. /* do we have a dn lease? */
  908. have_lease = have_dir_cap ||
  909. le32_to_cpu(rinfo->dlease->duration_ms);
  910. if (!have_lease)
  911. dout("fill_trace no dentry lease or dir cap\n");
  912. /* rename? */
  913. if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
  914. dout(" src %p '%.*s' dst %p '%.*s'\n",
  915. req->r_old_dentry,
  916. req->r_old_dentry->d_name.len,
  917. req->r_old_dentry->d_name.name,
  918. dn, dn->d_name.len, dn->d_name.name);
  919. dout("fill_trace doing d_move %p -> %p\n",
  920. req->r_old_dentry, dn);
  921. d_move(req->r_old_dentry, dn);
  922. dout(" src %p '%.*s' dst %p '%.*s'\n",
  923. req->r_old_dentry,
  924. req->r_old_dentry->d_name.len,
  925. req->r_old_dentry->d_name.name,
  926. dn, dn->d_name.len, dn->d_name.name);
  927. /* ensure target dentry is invalidated, despite
  928. rehashing bug in vfs_rename_dir */
  929. ceph_invalidate_dentry_lease(dn);
  930. /*
  931. * d_move() puts the renamed dentry at the end of
  932. * d_subdirs. We need to assign it an appropriate
  933. * directory offset so we can behave when holding
  934. * I_COMPLETE.
  935. */
  936. ceph_set_dentry_offset(req->r_old_dentry);
  937. dout("dn %p gets new offset %lld\n", req->r_old_dentry,
  938. ceph_dentry(req->r_old_dentry)->offset);
  939. dn = req->r_old_dentry; /* use old_dentry */
  940. in = dn->d_inode;
  941. }
  942. /* null dentry? */
  943. if (!rinfo->head->is_target) {
  944. dout("fill_trace null dentry\n");
  945. if (dn->d_inode) {
  946. dout("d_delete %p\n", dn);
  947. d_delete(dn);
  948. } else {
  949. dout("d_instantiate %p NULL\n", dn);
  950. d_instantiate(dn, NULL);
  951. if (have_lease && d_unhashed(dn))
  952. d_rehash(dn);
  953. update_dentry_lease(dn, rinfo->dlease,
  954. session,
  955. req->r_request_started);
  956. }
  957. goto done;
  958. }
  959. /* attach proper inode */
  960. ininfo = rinfo->targeti.in;
  961. vino.ino = le64_to_cpu(ininfo->ino);
  962. vino.snap = le64_to_cpu(ininfo->snapid);
  963. in = dn->d_inode;
  964. if (!in) {
  965. in = ceph_get_inode(sb, vino);
  966. if (IS_ERR(in)) {
  967. pr_err("fill_trace bad get_inode "
  968. "%llx.%llx\n", vino.ino, vino.snap);
  969. err = PTR_ERR(in);
  970. d_delete(dn);
  971. goto done;
  972. }
  973. dn = splice_dentry(dn, in, &have_lease, true);
  974. if (IS_ERR(dn)) {
  975. err = PTR_ERR(dn);
  976. goto done;
  977. }
  978. req->r_dentry = dn; /* may have spliced */
  979. ihold(in);
  980. } else if (ceph_ino(in) == vino.ino &&
  981. ceph_snap(in) == vino.snap) {
  982. ihold(in);
  983. } else {
  984. dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
  985. dn, in, ceph_ino(in), ceph_snap(in),
  986. vino.ino, vino.snap);
  987. have_lease = false;
  988. in = NULL;
  989. }
  990. if (have_lease)
  991. update_dentry_lease(dn, rinfo->dlease, session,
  992. req->r_request_started);
  993. dout(" final dn %p\n", dn);
  994. i++;
  995. } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
  996. req->r_op == CEPH_MDS_OP_MKSNAP) {
  997. struct dentry *dn = req->r_dentry;
  998. /* fill out a snapdir LOOKUPSNAP dentry */
  999. BUG_ON(!dn);
  1000. BUG_ON(!req->r_locked_dir);
  1001. BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
  1002. ininfo = rinfo->targeti.in;
  1003. vino.ino = le64_to_cpu(ininfo->ino);
  1004. vino.snap = le64_to_cpu(ininfo->snapid);
  1005. in = ceph_get_inode(sb, vino);
  1006. if (IS_ERR(in)) {
  1007. pr_err("fill_inode get_inode badness %llx.%llx\n",
  1008. vino.ino, vino.snap);
  1009. err = PTR_ERR(in);
  1010. d_delete(dn);
  1011. goto done;
  1012. }
  1013. dout(" linking snapped dir %p to dn %p\n", in, dn);
  1014. dn = splice_dentry(dn, in, NULL, true);
  1015. if (IS_ERR(dn)) {
  1016. err = PTR_ERR(dn);
  1017. goto done;
  1018. }
  1019. req->r_dentry = dn; /* may have spliced */
  1020. ihold(in);
  1021. rinfo->head->is_dentry = 1; /* fool notrace handlers */
  1022. }
  1023. if (rinfo->head->is_target) {
  1024. vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
  1025. vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
  1026. if (in == NULL || ceph_ino(in) != vino.ino ||
  1027. ceph_snap(in) != vino.snap) {
  1028. in = ceph_get_inode(sb, vino);
  1029. if (IS_ERR(in)) {
  1030. err = PTR_ERR(in);
  1031. goto done;
  1032. }
  1033. }
  1034. req->r_target_inode = in;
  1035. err = fill_inode(in,
  1036. &rinfo->targeti, NULL,
  1037. session, req->r_request_started,
  1038. (le32_to_cpu(rinfo->head->result) == 0) ?
  1039. req->r_fmode : -1,
  1040. &req->r_caps_reservation);
  1041. if (err < 0) {
  1042. pr_err("fill_inode badness %p %llx.%llx\n",
  1043. in, ceph_vinop(in));
  1044. goto done;
  1045. }
  1046. }
  1047. done:
  1048. dout("fill_trace done err=%d\n", err);
  1049. return err;
  1050. }
  1051. /*
  1052. * Prepopulate our cache with readdir results, leases, etc.
  1053. */
  1054. int ceph_readdir_prepopulate(struct ceph_mds_request *req,
  1055. struct ceph_mds_session *session)
  1056. {
  1057. struct dentry *parent = req->r_dentry;
  1058. struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
  1059. struct qstr dname;
  1060. struct dentry *dn;
  1061. struct inode *in;
  1062. int err = 0, i;
  1063. struct inode *snapdir = NULL;
  1064. struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
  1065. u64 frag = le32_to_cpu(rhead->args.readdir.frag);
  1066. struct ceph_dentry_info *di;
  1067. if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
  1068. snapdir = ceph_get_snapdir(parent->d_inode);
  1069. parent = d_find_alias(snapdir);
  1070. dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
  1071. rinfo->dir_nr, parent);
  1072. } else {
  1073. dout("readdir_prepopulate %d items under dn %p\n",
  1074. rinfo->dir_nr, parent);
  1075. if (rinfo->dir_dir)
  1076. ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
  1077. }
  1078. for (i = 0; i < rinfo->dir_nr; i++) {
  1079. struct ceph_vino vino;
  1080. dname.name = rinfo->dir_dname[i];
  1081. dname.len = rinfo->dir_dname_len[i];
  1082. dname.hash = full_name_hash(dname.name, dname.len);
  1083. vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
  1084. vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
  1085. retry_lookup:
  1086. dn = d_lookup(parent, &dname);
  1087. dout("d_lookup on parent=%p name=%.*s got %p\n",
  1088. parent, dname.len, dname.name, dn);
  1089. if (!dn) {
  1090. dn = d_alloc(parent, &dname);
  1091. dout("d_alloc %p '%.*s' = %p\n", parent,
  1092. dname.len, dname.name, dn);
  1093. if (dn == NULL) {
  1094. dout("d_alloc badness\n");
  1095. err = -ENOMEM;
  1096. goto out;
  1097. }
  1098. err = ceph_init_dentry(dn);
  1099. if (err < 0) {
  1100. dput(dn);
  1101. goto out;
  1102. }
  1103. } else if (dn->d_inode &&
  1104. (ceph_ino(dn->d_inode) != vino.ino ||
  1105. ceph_snap(dn->d_inode) != vino.snap)) {
  1106. dout(" dn %p points to wrong inode %p\n",
  1107. dn, dn->d_inode);
  1108. d_delete(dn);
  1109. dput(dn);
  1110. goto retry_lookup;
  1111. } else {
  1112. /* reorder parent's d_subdirs */
  1113. spin_lock(&parent->d_lock);
  1114. spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
  1115. list_move(&dn->d_u.d_child, &parent->d_subdirs);
  1116. spin_unlock(&dn->d_lock);
  1117. spin_unlock(&parent->d_lock);
  1118. }
  1119. di = dn->d_fsdata;
  1120. di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
  1121. /* inode */
  1122. if (dn->d_inode) {
  1123. in = dn->d_inode;
  1124. } else {
  1125. in = ceph_get_inode(parent->d_sb, vino);
  1126. if (IS_ERR(in)) {
  1127. dout("new_inode badness\n");
  1128. d_delete(dn);
  1129. dput(dn);
  1130. err = PTR_ERR(in);
  1131. goto out;
  1132. }
  1133. dn = splice_dentry(dn, in, NULL, false);
  1134. if (IS_ERR(dn))
  1135. dn = NULL;
  1136. }
  1137. if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
  1138. req->r_request_started, -1,
  1139. &req->r_caps_reservation) < 0) {
  1140. pr_err("fill_inode badness on %p\n", in);
  1141. goto next_item;
  1142. }
  1143. if (dn)
  1144. update_dentry_lease(dn, rinfo->dir_dlease[i],
  1145. req->r_session,
  1146. req->r_request_started);
  1147. next_item:
  1148. if (dn)
  1149. dput(dn);
  1150. }
  1151. req->r_did_prepopulate = true;
  1152. out:
  1153. if (snapdir) {
  1154. iput(snapdir);
  1155. dput(parent);
  1156. }
  1157. dout("readdir_prepopulate done\n");
  1158. return err;
  1159. }
  1160. int ceph_inode_set_size(struct inode *inode, loff_t size)
  1161. {
  1162. struct ceph_inode_info *ci = ceph_inode(inode);
  1163. int ret = 0;
  1164. spin_lock(&inode->i_lock);
  1165. dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
  1166. inode->i_size = size;
  1167. inode->i_blocks = (size + (1 << 9) - 1) >> 9;
  1168. /* tell the MDS if we are approaching max_size */
  1169. if ((size << 1) >= ci->i_max_size &&
  1170. (ci->i_reported_size << 1) < ci->i_max_size)
  1171. ret = 1;
  1172. spin_unlock(&inode->i_lock);
  1173. return ret;
  1174. }
  1175. /*
  1176. * Write back inode data in a worker thread. (This can't be done
  1177. * in the message handler context.)
  1178. */
  1179. void ceph_queue_writeback(struct inode *inode)
  1180. {
  1181. if (queue_work(ceph_inode_to_client(inode)->wb_wq,
  1182. &ceph_inode(inode)->i_wb_work)) {
  1183. dout("ceph_queue_writeback %p\n", inode);
  1184. ihold(inode);
  1185. } else {
  1186. dout("ceph_queue_writeback %p failed\n", inode);
  1187. }
  1188. }
  1189. static void ceph_writeback_work(struct work_struct *work)
  1190. {
  1191. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1192. i_wb_work);
  1193. struct inode *inode = &ci->vfs_inode;
  1194. dout("writeback %p\n", inode);
  1195. filemap_fdatawrite(&inode->i_data);
  1196. iput(inode);
  1197. }
  1198. /*
  1199. * queue an async invalidation
  1200. */
  1201. void ceph_queue_invalidate(struct inode *inode)
  1202. {
  1203. if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
  1204. &ceph_inode(inode)->i_pg_inv_work)) {
  1205. dout("ceph_queue_invalidate %p\n", inode);
  1206. ihold(inode);
  1207. } else {
  1208. dout("ceph_queue_invalidate %p failed\n", inode);
  1209. }
  1210. }
  1211. /*
  1212. * invalidate any pages that are not dirty or under writeback. this
  1213. * includes pages that are clean and mapped.
  1214. */
  1215. static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
  1216. {
  1217. struct pagevec pvec;
  1218. pgoff_t next = 0;
  1219. int i;
  1220. pagevec_init(&pvec, 0);
  1221. while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  1222. for (i = 0; i < pagevec_count(&pvec); i++) {
  1223. struct page *page = pvec.pages[i];
  1224. pgoff_t index;
  1225. int skip_page =
  1226. (PageDirty(page) || PageWriteback(page));
  1227. if (!skip_page)
  1228. skip_page = !trylock_page(page);
  1229. /*
  1230. * We really shouldn't be looking at the ->index of an
  1231. * unlocked page. But we're not allowed to lock these
  1232. * pages. So we rely upon nobody altering the ->index
  1233. * of this (pinned-by-us) page.
  1234. */
  1235. index = page->index;
  1236. if (index > next)
  1237. next = index;
  1238. next++;
  1239. if (skip_page)
  1240. continue;
  1241. generic_error_remove_page(mapping, page);
  1242. unlock_page(page);
  1243. }
  1244. pagevec_release(&pvec);
  1245. cond_resched();
  1246. }
  1247. }
  1248. /*
  1249. * Invalidate inode pages in a worker thread. (This can't be done
  1250. * in the message handler context.)
  1251. */
  1252. static void ceph_invalidate_work(struct work_struct *work)
  1253. {
  1254. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1255. i_pg_inv_work);
  1256. struct inode *inode = &ci->vfs_inode;
  1257. u32 orig_gen;
  1258. int check = 0;
  1259. spin_lock(&inode->i_lock);
  1260. dout("invalidate_pages %p gen %d revoking %d\n", inode,
  1261. ci->i_rdcache_gen, ci->i_rdcache_revoking);
  1262. if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
  1263. /* nevermind! */
  1264. spin_unlock(&inode->i_lock);
  1265. goto out;
  1266. }
  1267. orig_gen = ci->i_rdcache_gen;
  1268. spin_unlock(&inode->i_lock);
  1269. ceph_invalidate_nondirty_pages(inode->i_mapping);
  1270. spin_lock(&inode->i_lock);
  1271. if (orig_gen == ci->i_rdcache_gen &&
  1272. orig_gen == ci->i_rdcache_revoking) {
  1273. dout("invalidate_pages %p gen %d successful\n", inode,
  1274. ci->i_rdcache_gen);
  1275. ci->i_rdcache_revoking--;
  1276. check = 1;
  1277. } else {
  1278. dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
  1279. inode, orig_gen, ci->i_rdcache_gen,
  1280. ci->i_rdcache_revoking);
  1281. }
  1282. spin_unlock(&inode->i_lock);
  1283. if (check)
  1284. ceph_check_caps(ci, 0, NULL);
  1285. out:
  1286. iput(inode);
  1287. }
  1288. /*
  1289. * called by trunc_wq; take i_mutex ourselves
  1290. *
  1291. * We also truncate in a separate thread as well.
  1292. */
  1293. static void ceph_vmtruncate_work(struct work_struct *work)
  1294. {
  1295. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1296. i_vmtruncate_work);
  1297. struct inode *inode = &ci->vfs_inode;
  1298. dout("vmtruncate_work %p\n", inode);
  1299. mutex_lock(&inode->i_mutex);
  1300. __ceph_do_pending_vmtruncate(inode);
  1301. mutex_unlock(&inode->i_mutex);
  1302. iput(inode);
  1303. }
  1304. /*
  1305. * Queue an async vmtruncate. If we fail to queue work, we will handle
  1306. * the truncation the next time we call __ceph_do_pending_vmtruncate.
  1307. */
  1308. void ceph_queue_vmtruncate(struct inode *inode)
  1309. {
  1310. struct ceph_inode_info *ci = ceph_inode(inode);
  1311. if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
  1312. &ci->i_vmtruncate_work)) {
  1313. dout("ceph_queue_vmtruncate %p\n", inode);
  1314. ihold(inode);
  1315. } else {
  1316. dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
  1317. inode, ci->i_truncate_pending);
  1318. }
  1319. }
  1320. /*
  1321. * called with i_mutex held.
  1322. *
  1323. * Make sure any pending truncation is applied before doing anything
  1324. * that may depend on it.
  1325. */
  1326. void __ceph_do_pending_vmtruncate(struct inode *inode)
  1327. {
  1328. struct ceph_inode_info *ci = ceph_inode(inode);
  1329. u64 to;
  1330. int wrbuffer_refs, wake = 0;
  1331. retry:
  1332. spin_lock(&inode->i_lock);
  1333. if (ci->i_truncate_pending == 0) {
  1334. dout("__do_pending_vmtruncate %p none pending\n", inode);
  1335. spin_unlock(&inode->i_lock);
  1336. return;
  1337. }
  1338. /*
  1339. * make sure any dirty snapped pages are flushed before we
  1340. * possibly truncate them.. so write AND block!
  1341. */
  1342. if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
  1343. dout("__do_pending_vmtruncate %p flushing snaps first\n",
  1344. inode);
  1345. spin_unlock(&inode->i_lock);
  1346. filemap_write_and_wait_range(&inode->i_data, 0,
  1347. inode->i_sb->s_maxbytes);
  1348. goto retry;
  1349. }
  1350. to = ci->i_truncate_size;
  1351. wrbuffer_refs = ci->i_wrbuffer_ref;
  1352. dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
  1353. ci->i_truncate_pending, to);
  1354. spin_unlock(&inode->i_lock);
  1355. truncate_inode_pages(inode->i_mapping, to);
  1356. spin_lock(&inode->i_lock);
  1357. ci->i_truncate_pending--;
  1358. if (ci->i_truncate_pending == 0)
  1359. wake = 1;
  1360. spin_unlock(&inode->i_lock);
  1361. if (wrbuffer_refs == 0)
  1362. ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
  1363. if (wake)
  1364. wake_up_all(&ci->i_cap_wq);
  1365. }
  1366. /*
  1367. * symlinks
  1368. */
  1369. static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
  1370. {
  1371. struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
  1372. nd_set_link(nd, ci->i_symlink);
  1373. return NULL;
  1374. }
  1375. static const struct inode_operations ceph_symlink_iops = {
  1376. .readlink = generic_readlink,
  1377. .follow_link = ceph_sym_follow_link,
  1378. };
  1379. /*
  1380. * setattr
  1381. */
  1382. int ceph_setattr(struct dentry *dentry, struct iattr *attr)
  1383. {
  1384. struct inode *inode = dentry->d_inode;
  1385. struct ceph_inode_info *ci = ceph_inode(inode);
  1386. struct inode *parent_inode;
  1387. const unsigned int ia_valid = attr->ia_valid;
  1388. struct ceph_mds_request *req;
  1389. struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
  1390. int issued;
  1391. int release = 0, dirtied = 0;
  1392. int mask = 0;
  1393. int err = 0;
  1394. int inode_dirty_flags = 0;
  1395. if (ceph_snap(inode) != CEPH_NOSNAP)
  1396. return -EROFS;
  1397. __ceph_do_pending_vmtruncate(inode);
  1398. err = inode_change_ok(inode, attr);
  1399. if (err != 0)
  1400. return err;
  1401. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
  1402. USE_AUTH_MDS);
  1403. if (IS_ERR(req))
  1404. return PTR_ERR(req);
  1405. spin_lock(&inode->i_lock);
  1406. issued = __ceph_caps_issued(ci, NULL);
  1407. dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
  1408. if (ia_valid & ATTR_UID) {
  1409. dout("setattr %p uid %d -> %d\n", inode,
  1410. inode->i_uid, attr->ia_uid);
  1411. if (issued & CEPH_CAP_AUTH_EXCL) {
  1412. inode->i_uid = attr->ia_uid;
  1413. dirtied |= CEPH_CAP_AUTH_EXCL;
  1414. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1415. attr->ia_uid != inode->i_uid) {
  1416. req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
  1417. mask |= CEPH_SETATTR_UID;
  1418. release |= CEPH_CAP_AUTH_SHARED;
  1419. }
  1420. }
  1421. if (ia_valid & ATTR_GID) {
  1422. dout("setattr %p gid %d -> %d\n", inode,
  1423. inode->i_gid, attr->ia_gid);
  1424. if (issued & CEPH_CAP_AUTH_EXCL) {
  1425. inode->i_gid = attr->ia_gid;
  1426. dirtied |= CEPH_CAP_AUTH_EXCL;
  1427. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1428. attr->ia_gid != inode->i_gid) {
  1429. req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
  1430. mask |= CEPH_SETATTR_GID;
  1431. release |= CEPH_CAP_AUTH_SHARED;
  1432. }
  1433. }
  1434. if (ia_valid & ATTR_MODE) {
  1435. dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
  1436. attr->ia_mode);
  1437. if (issued & CEPH_CAP_AUTH_EXCL) {
  1438. inode->i_mode = attr->ia_mode;
  1439. dirtied |= CEPH_CAP_AUTH_EXCL;
  1440. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1441. attr->ia_mode != inode->i_mode) {
  1442. req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
  1443. mask |= CEPH_SETATTR_MODE;
  1444. release |= CEPH_CAP_AUTH_SHARED;
  1445. }
  1446. }
  1447. if (ia_valid & ATTR_ATIME) {
  1448. dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
  1449. inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
  1450. attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
  1451. if (issued & CEPH_CAP_FILE_EXCL) {
  1452. ci->i_time_warp_seq++;
  1453. inode->i_atime = attr->ia_atime;
  1454. dirtied |= CEPH_CAP_FILE_EXCL;
  1455. } else if ((issued & CEPH_CAP_FILE_WR) &&
  1456. timespec_compare(&inode->i_atime,
  1457. &attr->ia_atime) < 0) {
  1458. inode->i_atime = attr->ia_atime;
  1459. dirtied |= CEPH_CAP_FILE_WR;
  1460. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1461. !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
  1462. ceph_encode_timespec(&req->r_args.setattr.atime,
  1463. &attr->ia_atime);
  1464. mask |= CEPH_SETATTR_ATIME;
  1465. release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
  1466. CEPH_CAP_FILE_WR;
  1467. }
  1468. }
  1469. if (ia_valid & ATTR_MTIME) {
  1470. dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
  1471. inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
  1472. attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
  1473. if (issued & CEPH_CAP_FILE_EXCL) {
  1474. ci->i_time_warp_seq++;
  1475. inode->i_mtime = attr->ia_mtime;
  1476. dirtied |= CEPH_CAP_FILE_EXCL;
  1477. } else if ((issued & CEPH_CAP_FILE_WR) &&
  1478. timespec_compare(&inode->i_mtime,
  1479. &attr->ia_mtime) < 0) {
  1480. inode->i_mtime = attr->ia_mtime;
  1481. dirtied |= CEPH_CAP_FILE_WR;
  1482. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1483. !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
  1484. ceph_encode_timespec(&req->r_args.setattr.mtime,
  1485. &attr->ia_mtime);
  1486. mask |= CEPH_SETATTR_MTIME;
  1487. release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
  1488. CEPH_CAP_FILE_WR;
  1489. }
  1490. }
  1491. if (ia_valid & ATTR_SIZE) {
  1492. dout("setattr %p size %lld -> %lld\n", inode,
  1493. inode->i_size, attr->ia_size);
  1494. if (attr->ia_size > inode->i_sb->s_maxbytes) {
  1495. err = -EINVAL;
  1496. goto out;
  1497. }
  1498. if ((issued & CEPH_CAP_FILE_EXCL) &&
  1499. attr->ia_size > inode->i_size) {
  1500. inode->i_size = attr->ia_size;
  1501. inode->i_blocks =
  1502. (attr->ia_size + (1 << 9) - 1) >> 9;
  1503. inode->i_ctime = attr->ia_ctime;
  1504. ci->i_reported_size = attr->ia_size;
  1505. dirtied |= CEPH_CAP_FILE_EXCL;
  1506. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1507. attr->ia_size != inode->i_size) {
  1508. req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
  1509. req->r_args.setattr.old_size =
  1510. cpu_to_le64(inode->i_size);
  1511. mask |= CEPH_SETATTR_SIZE;
  1512. release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
  1513. CEPH_CAP_FILE_WR;
  1514. }
  1515. }
  1516. /* these do nothing */
  1517. if (ia_valid & ATTR_CTIME) {
  1518. bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
  1519. ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
  1520. dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
  1521. inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
  1522. attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
  1523. only ? "ctime only" : "ignored");
  1524. inode->i_ctime = attr->ia_ctime;
  1525. if (only) {
  1526. /*
  1527. * if kernel wants to dirty ctime but nothing else,
  1528. * we need to choose a cap to dirty under, or do
  1529. * a almost-no-op setattr
  1530. */
  1531. if (issued & CEPH_CAP_AUTH_EXCL)
  1532. dirtied |= CEPH_CAP_AUTH_EXCL;
  1533. else if (issued & CEPH_CAP_FILE_EXCL)
  1534. dirtied |= CEPH_CAP_FILE_EXCL;
  1535. else if (issued & CEPH_CAP_XATTR_EXCL)
  1536. dirtied |= CEPH_CAP_XATTR_EXCL;
  1537. else
  1538. mask |= CEPH_SETATTR_CTIME;
  1539. }
  1540. }
  1541. if (ia_valid & ATTR_FILE)
  1542. dout("setattr %p ATTR_FILE ... hrm!\n", inode);
  1543. if (dirtied) {
  1544. inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
  1545. inode->i_ctime = CURRENT_TIME;
  1546. }
  1547. release &= issued;
  1548. spin_unlock(&inode->i_lock);
  1549. if (inode_dirty_flags)
  1550. __mark_inode_dirty(inode, inode_dirty_flags);
  1551. if (mask) {
  1552. req->r_inode = inode;
  1553. ihold(inode);
  1554. req->r_inode_drop = release;
  1555. req->r_args.setattr.mask = cpu_to_le32(mask);
  1556. req->r_num_caps = 1;
  1557. parent_inode = ceph_get_dentry_parent_inode(dentry);
  1558. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  1559. iput(parent_inode);
  1560. }
  1561. dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
  1562. ceph_cap_string(dirtied), mask);
  1563. ceph_mdsc_put_request(req);
  1564. __ceph_do_pending_vmtruncate(inode);
  1565. return err;
  1566. out:
  1567. spin_unlock(&inode->i_lock);
  1568. ceph_mdsc_put_request(req);
  1569. return err;
  1570. }
  1571. /*
  1572. * Verify that we have a lease on the given mask. If not,
  1573. * do a getattr against an mds.
  1574. */
  1575. int ceph_do_getattr(struct inode *inode, int mask)
  1576. {
  1577. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  1578. struct ceph_mds_client *mdsc = fsc->mdsc;
  1579. struct ceph_mds_request *req;
  1580. int err;
  1581. if (ceph_snap(inode) == CEPH_SNAPDIR) {
  1582. dout("do_getattr inode %p SNAPDIR\n", inode);
  1583. return 0;
  1584. }
  1585. dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
  1586. if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
  1587. return 0;
  1588. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
  1589. if (IS_ERR(req))
  1590. return PTR_ERR(req);
  1591. req->r_inode = inode;
  1592. ihold(inode);
  1593. req->r_num_caps = 1;
  1594. req->r_args.getattr.mask = cpu_to_le32(mask);
  1595. err = ceph_mdsc_do_request(mdsc, NULL, req);
  1596. ceph_mdsc_put_request(req);
  1597. dout("do_getattr result=%d\n", err);
  1598. return err;
  1599. }
  1600. /*
  1601. * Check inode permissions. We verify we have a valid value for
  1602. * the AUTH cap, then call the generic handler.
  1603. */
  1604. int ceph_permission(struct inode *inode, int mask)
  1605. {
  1606. int err;
  1607. if (mask & MAY_NOT_BLOCK)
  1608. return -ECHILD;
  1609. err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
  1610. if (!err)
  1611. err = generic_permission(inode, mask);
  1612. return err;
  1613. }
  1614. /*
  1615. * Get all attributes. Hopefully somedata we'll have a statlite()
  1616. * and can limit the fields we require to be accurate.
  1617. */
  1618. int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
  1619. struct kstat *stat)
  1620. {
  1621. struct inode *inode = dentry->d_inode;
  1622. struct ceph_inode_info *ci = ceph_inode(inode);
  1623. int err;
  1624. err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
  1625. if (!err) {
  1626. generic_fillattr(inode, stat);
  1627. stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
  1628. if (ceph_snap(inode) != CEPH_NOSNAP)
  1629. stat->dev = ceph_snap(inode);
  1630. else
  1631. stat->dev = 0;
  1632. if (S_ISDIR(inode->i_mode)) {
  1633. if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
  1634. RBYTES))
  1635. stat->size = ci->i_rbytes;
  1636. else
  1637. stat->size = ci->i_files + ci->i_subdirs;
  1638. stat->blocks = 0;
  1639. stat->blksize = 65536;
  1640. }
  1641. }
  1642. return err;
  1643. }