inode.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/fs.h>
  4. #include <linux/slab.h>
  5. #include <linux/string.h>
  6. #include <linux/uaccess.h>
  7. #include <linux/kernel.h>
  8. #include <linux/namei.h>
  9. #include <linux/writeback.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/pagevec.h>
  12. #include "super.h"
  13. #include "mds_client.h"
  14. #include <linux/ceph/decode.h>
  15. /*
  16. * Ceph inode operations
  17. *
  18. * Implement basic inode helpers (get, alloc) and inode ops (getattr,
  19. * setattr, etc.), xattr helpers, and helpers for assimilating
  20. * metadata returned by the MDS into our cache.
  21. *
  22. * Also define helpers for doing asynchronous writeback, invalidation,
  23. * and truncation for the benefit of those who can't afford to block
  24. * (typically because they are in the message handler path).
  25. */
  26. static const struct inode_operations ceph_symlink_iops;
  27. static void ceph_invalidate_work(struct work_struct *work);
  28. static void ceph_writeback_work(struct work_struct *work);
  29. static void ceph_vmtruncate_work(struct work_struct *work);
  30. /*
  31. * find or create an inode, given the ceph ino number
  32. */
  33. struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
  34. {
  35. struct inode *inode;
  36. ino_t t = ceph_vino_to_ino(vino);
  37. inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
  38. if (inode == NULL)
  39. return ERR_PTR(-ENOMEM);
  40. if (inode->i_state & I_NEW) {
  41. dout("get_inode created new inode %p %llx.%llx ino %llx\n",
  42. inode, ceph_vinop(inode), (u64)inode->i_ino);
  43. unlock_new_inode(inode);
  44. }
  45. dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
  46. vino.snap, inode);
  47. return inode;
  48. }
  49. /*
  50. * get/constuct snapdir inode for a given directory
  51. */
  52. struct inode *ceph_get_snapdir(struct inode *parent)
  53. {
  54. struct ceph_vino vino = {
  55. .ino = ceph_ino(parent),
  56. .snap = CEPH_SNAPDIR,
  57. };
  58. struct inode *inode = ceph_get_inode(parent->i_sb, vino);
  59. struct ceph_inode_info *ci = ceph_inode(inode);
  60. BUG_ON(!S_ISDIR(parent->i_mode));
  61. if (IS_ERR(inode))
  62. return inode;
  63. inode->i_mode = parent->i_mode;
  64. inode->i_uid = parent->i_uid;
  65. inode->i_gid = parent->i_gid;
  66. inode->i_op = &ceph_dir_iops;
  67. inode->i_fop = &ceph_dir_fops;
  68. ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
  69. ci->i_rbytes = 0;
  70. return inode;
  71. }
  72. const struct inode_operations ceph_file_iops = {
  73. .permission = ceph_permission,
  74. .setattr = ceph_setattr,
  75. .getattr = ceph_getattr,
  76. .setxattr = ceph_setxattr,
  77. .getxattr = ceph_getxattr,
  78. .listxattr = ceph_listxattr,
  79. .removexattr = ceph_removexattr,
  80. };
  81. /*
  82. * We use a 'frag tree' to keep track of the MDS's directory fragments
  83. * for a given inode (usually there is just a single fragment). We
  84. * need to know when a child frag is delegated to a new MDS, or when
  85. * it is flagged as replicated, so we can direct our requests
  86. * accordingly.
  87. */
  88. /*
  89. * find/create a frag in the tree
  90. */
  91. static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
  92. u32 f)
  93. {
  94. struct rb_node **p;
  95. struct rb_node *parent = NULL;
  96. struct ceph_inode_frag *frag;
  97. int c;
  98. p = &ci->i_fragtree.rb_node;
  99. while (*p) {
  100. parent = *p;
  101. frag = rb_entry(parent, struct ceph_inode_frag, node);
  102. c = ceph_frag_compare(f, frag->frag);
  103. if (c < 0)
  104. p = &(*p)->rb_left;
  105. else if (c > 0)
  106. p = &(*p)->rb_right;
  107. else
  108. return frag;
  109. }
  110. frag = kmalloc(sizeof(*frag), GFP_NOFS);
  111. if (!frag) {
  112. pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
  113. "frag %x\n", &ci->vfs_inode,
  114. ceph_vinop(&ci->vfs_inode), f);
  115. return ERR_PTR(-ENOMEM);
  116. }
  117. frag->frag = f;
  118. frag->split_by = 0;
  119. frag->mds = -1;
  120. frag->ndist = 0;
  121. rb_link_node(&frag->node, parent, p);
  122. rb_insert_color(&frag->node, &ci->i_fragtree);
  123. dout("get_or_create_frag added %llx.%llx frag %x\n",
  124. ceph_vinop(&ci->vfs_inode), f);
  125. return frag;
  126. }
  127. /*
  128. * find a specific frag @f
  129. */
  130. struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
  131. {
  132. struct rb_node *n = ci->i_fragtree.rb_node;
  133. while (n) {
  134. struct ceph_inode_frag *frag =
  135. rb_entry(n, struct ceph_inode_frag, node);
  136. int c = ceph_frag_compare(f, frag->frag);
  137. if (c < 0)
  138. n = n->rb_left;
  139. else if (c > 0)
  140. n = n->rb_right;
  141. else
  142. return frag;
  143. }
  144. return NULL;
  145. }
  146. /*
  147. * Choose frag containing the given value @v. If @pfrag is
  148. * specified, copy the frag delegation info to the caller if
  149. * it is present.
  150. */
  151. u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
  152. struct ceph_inode_frag *pfrag,
  153. int *found)
  154. {
  155. u32 t = ceph_frag_make(0, 0);
  156. struct ceph_inode_frag *frag;
  157. unsigned nway, i;
  158. u32 n;
  159. if (found)
  160. *found = 0;
  161. mutex_lock(&ci->i_fragtree_mutex);
  162. while (1) {
  163. WARN_ON(!ceph_frag_contains_value(t, v));
  164. frag = __ceph_find_frag(ci, t);
  165. if (!frag)
  166. break; /* t is a leaf */
  167. if (frag->split_by == 0) {
  168. if (pfrag)
  169. memcpy(pfrag, frag, sizeof(*pfrag));
  170. if (found)
  171. *found = 1;
  172. break;
  173. }
  174. /* choose child */
  175. nway = 1 << frag->split_by;
  176. dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
  177. frag->split_by, nway);
  178. for (i = 0; i < nway; i++) {
  179. n = ceph_frag_make_child(t, frag->split_by, i);
  180. if (ceph_frag_contains_value(n, v)) {
  181. t = n;
  182. break;
  183. }
  184. }
  185. BUG_ON(i == nway);
  186. }
  187. dout("choose_frag(%x) = %x\n", v, t);
  188. mutex_unlock(&ci->i_fragtree_mutex);
  189. return t;
  190. }
  191. /*
  192. * Process dirfrag (delegation) info from the mds. Include leaf
  193. * fragment in tree ONLY if ndist > 0. Otherwise, only
  194. * branches/splits are included in i_fragtree)
  195. */
  196. static int ceph_fill_dirfrag(struct inode *inode,
  197. struct ceph_mds_reply_dirfrag *dirinfo)
  198. {
  199. struct ceph_inode_info *ci = ceph_inode(inode);
  200. struct ceph_inode_frag *frag;
  201. u32 id = le32_to_cpu(dirinfo->frag);
  202. int mds = le32_to_cpu(dirinfo->auth);
  203. int ndist = le32_to_cpu(dirinfo->ndist);
  204. int i;
  205. int err = 0;
  206. mutex_lock(&ci->i_fragtree_mutex);
  207. if (ndist == 0) {
  208. /* no delegation info needed. */
  209. frag = __ceph_find_frag(ci, id);
  210. if (!frag)
  211. goto out;
  212. if (frag->split_by == 0) {
  213. /* tree leaf, remove */
  214. dout("fill_dirfrag removed %llx.%llx frag %x"
  215. " (no ref)\n", ceph_vinop(inode), id);
  216. rb_erase(&frag->node, &ci->i_fragtree);
  217. kfree(frag);
  218. } else {
  219. /* tree branch, keep and clear */
  220. dout("fill_dirfrag cleared %llx.%llx frag %x"
  221. " referral\n", ceph_vinop(inode), id);
  222. frag->mds = -1;
  223. frag->ndist = 0;
  224. }
  225. goto out;
  226. }
  227. /* find/add this frag to store mds delegation info */
  228. frag = __get_or_create_frag(ci, id);
  229. if (IS_ERR(frag)) {
  230. /* this is not the end of the world; we can continue
  231. with bad/inaccurate delegation info */
  232. pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
  233. ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
  234. err = -ENOMEM;
  235. goto out;
  236. }
  237. frag->mds = mds;
  238. frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
  239. for (i = 0; i < frag->ndist; i++)
  240. frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
  241. dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
  242. ceph_vinop(inode), frag->frag, frag->ndist);
  243. out:
  244. mutex_unlock(&ci->i_fragtree_mutex);
  245. return err;
  246. }
  247. /*
  248. * initialize a newly allocated inode.
  249. */
  250. struct inode *ceph_alloc_inode(struct super_block *sb)
  251. {
  252. struct ceph_inode_info *ci;
  253. int i;
  254. ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
  255. if (!ci)
  256. return NULL;
  257. dout("alloc_inode %p\n", &ci->vfs_inode);
  258. ci->i_version = 0;
  259. ci->i_time_warp_seq = 0;
  260. ci->i_ceph_flags = 0;
  261. ci->i_release_count = 0;
  262. ci->i_symlink = NULL;
  263. memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
  264. ci->i_fragtree = RB_ROOT;
  265. mutex_init(&ci->i_fragtree_mutex);
  266. ci->i_xattrs.blob = NULL;
  267. ci->i_xattrs.prealloc_blob = NULL;
  268. ci->i_xattrs.dirty = false;
  269. ci->i_xattrs.index = RB_ROOT;
  270. ci->i_xattrs.count = 0;
  271. ci->i_xattrs.names_size = 0;
  272. ci->i_xattrs.vals_size = 0;
  273. ci->i_xattrs.version = 0;
  274. ci->i_xattrs.index_version = 0;
  275. ci->i_caps = RB_ROOT;
  276. ci->i_auth_cap = NULL;
  277. ci->i_dirty_caps = 0;
  278. ci->i_flushing_caps = 0;
  279. INIT_LIST_HEAD(&ci->i_dirty_item);
  280. INIT_LIST_HEAD(&ci->i_flushing_item);
  281. ci->i_cap_flush_seq = 0;
  282. ci->i_cap_flush_last_tid = 0;
  283. memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
  284. init_waitqueue_head(&ci->i_cap_wq);
  285. ci->i_hold_caps_min = 0;
  286. ci->i_hold_caps_max = 0;
  287. INIT_LIST_HEAD(&ci->i_cap_delay_list);
  288. ci->i_cap_exporting_mds = 0;
  289. ci->i_cap_exporting_mseq = 0;
  290. ci->i_cap_exporting_issued = 0;
  291. INIT_LIST_HEAD(&ci->i_cap_snaps);
  292. ci->i_head_snapc = NULL;
  293. ci->i_snap_caps = 0;
  294. for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
  295. ci->i_nr_by_mode[i] = 0;
  296. ci->i_truncate_seq = 0;
  297. ci->i_truncate_size = 0;
  298. ci->i_truncate_pending = 0;
  299. ci->i_max_size = 0;
  300. ci->i_reported_size = 0;
  301. ci->i_wanted_max_size = 0;
  302. ci->i_requested_max_size = 0;
  303. ci->i_pin_ref = 0;
  304. ci->i_rd_ref = 0;
  305. ci->i_rdcache_ref = 0;
  306. ci->i_wr_ref = 0;
  307. ci->i_wrbuffer_ref = 0;
  308. ci->i_wrbuffer_ref_head = 0;
  309. ci->i_shared_gen = 0;
  310. ci->i_rdcache_gen = 0;
  311. ci->i_rdcache_revoking = 0;
  312. INIT_LIST_HEAD(&ci->i_unsafe_writes);
  313. INIT_LIST_HEAD(&ci->i_unsafe_dirops);
  314. spin_lock_init(&ci->i_unsafe_lock);
  315. ci->i_snap_realm = NULL;
  316. INIT_LIST_HEAD(&ci->i_snap_realm_item);
  317. INIT_LIST_HEAD(&ci->i_snap_flush_item);
  318. INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
  319. INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
  320. INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
  321. return &ci->vfs_inode;
  322. }
  323. static void ceph_i_callback(struct rcu_head *head)
  324. {
  325. struct inode *inode = container_of(head, struct inode, i_rcu);
  326. struct ceph_inode_info *ci = ceph_inode(inode);
  327. INIT_LIST_HEAD(&inode->i_dentry);
  328. kmem_cache_free(ceph_inode_cachep, ci);
  329. }
  330. void ceph_destroy_inode(struct inode *inode)
  331. {
  332. struct ceph_inode_info *ci = ceph_inode(inode);
  333. struct ceph_inode_frag *frag;
  334. struct rb_node *n;
  335. dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
  336. ceph_queue_caps_release(inode);
  337. /*
  338. * we may still have a snap_realm reference if there are stray
  339. * caps in i_cap_exporting_issued or i_snap_caps.
  340. */
  341. if (ci->i_snap_realm) {
  342. struct ceph_mds_client *mdsc =
  343. ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
  344. struct ceph_snap_realm *realm = ci->i_snap_realm;
  345. dout(" dropping residual ref to snap realm %p\n", realm);
  346. spin_lock(&realm->inodes_with_caps_lock);
  347. list_del_init(&ci->i_snap_realm_item);
  348. spin_unlock(&realm->inodes_with_caps_lock);
  349. ceph_put_snap_realm(mdsc, realm);
  350. }
  351. kfree(ci->i_symlink);
  352. while ((n = rb_first(&ci->i_fragtree)) != NULL) {
  353. frag = rb_entry(n, struct ceph_inode_frag, node);
  354. rb_erase(n, &ci->i_fragtree);
  355. kfree(frag);
  356. }
  357. __ceph_destroy_xattrs(ci);
  358. if (ci->i_xattrs.blob)
  359. ceph_buffer_put(ci->i_xattrs.blob);
  360. if (ci->i_xattrs.prealloc_blob)
  361. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  362. call_rcu(&inode->i_rcu, ceph_i_callback);
  363. }
  364. /*
  365. * Helpers to fill in size, ctime, mtime, and atime. We have to be
  366. * careful because either the client or MDS may have more up to date
  367. * info, depending on which capabilities are held, and whether
  368. * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
  369. * and size are monotonically increasing, except when utimes() or
  370. * truncate() increments the corresponding _seq values.)
  371. */
  372. int ceph_fill_file_size(struct inode *inode, int issued,
  373. u32 truncate_seq, u64 truncate_size, u64 size)
  374. {
  375. struct ceph_inode_info *ci = ceph_inode(inode);
  376. int queue_trunc = 0;
  377. if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
  378. (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
  379. dout("size %lld -> %llu\n", inode->i_size, size);
  380. inode->i_size = size;
  381. inode->i_blocks = (size + (1<<9) - 1) >> 9;
  382. ci->i_reported_size = size;
  383. if (truncate_seq != ci->i_truncate_seq) {
  384. dout("truncate_seq %u -> %u\n",
  385. ci->i_truncate_seq, truncate_seq);
  386. ci->i_truncate_seq = truncate_seq;
  387. /*
  388. * If we hold relevant caps, or in the case where we're
  389. * not the only client referencing this file and we
  390. * don't hold those caps, then we need to check whether
  391. * the file is either opened or mmaped
  392. */
  393. if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
  394. CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
  395. CEPH_CAP_FILE_EXCL|
  396. CEPH_CAP_FILE_LAZYIO)) ||
  397. mapping_mapped(inode->i_mapping) ||
  398. __ceph_caps_file_wanted(ci)) {
  399. ci->i_truncate_pending++;
  400. queue_trunc = 1;
  401. }
  402. }
  403. }
  404. if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
  405. ci->i_truncate_size != truncate_size) {
  406. dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
  407. truncate_size);
  408. ci->i_truncate_size = truncate_size;
  409. }
  410. return queue_trunc;
  411. }
  412. void ceph_fill_file_time(struct inode *inode, int issued,
  413. u64 time_warp_seq, struct timespec *ctime,
  414. struct timespec *mtime, struct timespec *atime)
  415. {
  416. struct ceph_inode_info *ci = ceph_inode(inode);
  417. int warn = 0;
  418. if (issued & (CEPH_CAP_FILE_EXCL|
  419. CEPH_CAP_FILE_WR|
  420. CEPH_CAP_FILE_BUFFER|
  421. CEPH_CAP_AUTH_EXCL|
  422. CEPH_CAP_XATTR_EXCL)) {
  423. if (timespec_compare(ctime, &inode->i_ctime) > 0) {
  424. dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
  425. inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
  426. ctime->tv_sec, ctime->tv_nsec);
  427. inode->i_ctime = *ctime;
  428. }
  429. if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
  430. /* the MDS did a utimes() */
  431. dout("mtime %ld.%09ld -> %ld.%09ld "
  432. "tw %d -> %d\n",
  433. inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
  434. mtime->tv_sec, mtime->tv_nsec,
  435. ci->i_time_warp_seq, (int)time_warp_seq);
  436. inode->i_mtime = *mtime;
  437. inode->i_atime = *atime;
  438. ci->i_time_warp_seq = time_warp_seq;
  439. } else if (time_warp_seq == ci->i_time_warp_seq) {
  440. /* nobody did utimes(); take the max */
  441. if (timespec_compare(mtime, &inode->i_mtime) > 0) {
  442. dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
  443. inode->i_mtime.tv_sec,
  444. inode->i_mtime.tv_nsec,
  445. mtime->tv_sec, mtime->tv_nsec);
  446. inode->i_mtime = *mtime;
  447. }
  448. if (timespec_compare(atime, &inode->i_atime) > 0) {
  449. dout("atime %ld.%09ld -> %ld.%09ld inc\n",
  450. inode->i_atime.tv_sec,
  451. inode->i_atime.tv_nsec,
  452. atime->tv_sec, atime->tv_nsec);
  453. inode->i_atime = *atime;
  454. }
  455. } else if (issued & CEPH_CAP_FILE_EXCL) {
  456. /* we did a utimes(); ignore mds values */
  457. } else {
  458. warn = 1;
  459. }
  460. } else {
  461. /* we have no write|excl caps; whatever the MDS says is true */
  462. if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
  463. inode->i_ctime = *ctime;
  464. inode->i_mtime = *mtime;
  465. inode->i_atime = *atime;
  466. ci->i_time_warp_seq = time_warp_seq;
  467. } else {
  468. warn = 1;
  469. }
  470. }
  471. if (warn) /* time_warp_seq shouldn't go backwards */
  472. dout("%p mds time_warp_seq %llu < %u\n",
  473. inode, time_warp_seq, ci->i_time_warp_seq);
  474. }
  475. /*
  476. * Populate an inode based on info from mds. May be called on new or
  477. * existing inodes.
  478. */
  479. static int fill_inode(struct inode *inode,
  480. struct ceph_mds_reply_info_in *iinfo,
  481. struct ceph_mds_reply_dirfrag *dirinfo,
  482. struct ceph_mds_session *session,
  483. unsigned long ttl_from, int cap_fmode,
  484. struct ceph_cap_reservation *caps_reservation)
  485. {
  486. struct ceph_mds_reply_inode *info = iinfo->in;
  487. struct ceph_inode_info *ci = ceph_inode(inode);
  488. int i;
  489. int issued, implemented;
  490. struct timespec mtime, atime, ctime;
  491. u32 nsplits;
  492. struct ceph_buffer *xattr_blob = NULL;
  493. int err = 0;
  494. int queue_trunc = 0;
  495. dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
  496. inode, ceph_vinop(inode), le64_to_cpu(info->version),
  497. ci->i_version);
  498. /*
  499. * prealloc xattr data, if it looks like we'll need it. only
  500. * if len > 4 (meaning there are actually xattrs; the first 4
  501. * bytes are the xattr count).
  502. */
  503. if (iinfo->xattr_len > 4) {
  504. xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
  505. if (!xattr_blob)
  506. pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
  507. iinfo->xattr_len);
  508. }
  509. spin_lock(&inode->i_lock);
  510. /*
  511. * provided version will be odd if inode value is projected,
  512. * even if stable. skip the update if we have newer stable
  513. * info (ours>=theirs, e.g. due to racing mds replies), unless
  514. * we are getting projected (unstable) info (in which case the
  515. * version is odd, and we want ours>theirs).
  516. * us them
  517. * 2 2 skip
  518. * 3 2 skip
  519. * 3 3 update
  520. */
  521. if (le64_to_cpu(info->version) > 0 &&
  522. (ci->i_version & ~1) >= le64_to_cpu(info->version))
  523. goto no_change;
  524. issued = __ceph_caps_issued(ci, &implemented);
  525. issued |= implemented | __ceph_caps_dirty(ci);
  526. /* update inode */
  527. ci->i_version = le64_to_cpu(info->version);
  528. inode->i_version++;
  529. inode->i_rdev = le32_to_cpu(info->rdev);
  530. if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
  531. inode->i_mode = le32_to_cpu(info->mode);
  532. inode->i_uid = le32_to_cpu(info->uid);
  533. inode->i_gid = le32_to_cpu(info->gid);
  534. dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
  535. inode->i_uid, inode->i_gid);
  536. }
  537. if ((issued & CEPH_CAP_LINK_EXCL) == 0)
  538. inode->i_nlink = le32_to_cpu(info->nlink);
  539. /* be careful with mtime, atime, size */
  540. ceph_decode_timespec(&atime, &info->atime);
  541. ceph_decode_timespec(&mtime, &info->mtime);
  542. ceph_decode_timespec(&ctime, &info->ctime);
  543. queue_trunc = ceph_fill_file_size(inode, issued,
  544. le32_to_cpu(info->truncate_seq),
  545. le64_to_cpu(info->truncate_size),
  546. le64_to_cpu(info->size));
  547. ceph_fill_file_time(inode, issued,
  548. le32_to_cpu(info->time_warp_seq),
  549. &ctime, &mtime, &atime);
  550. /* only update max_size on auth cap */
  551. if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
  552. ci->i_max_size != le64_to_cpu(info->max_size)) {
  553. dout("max_size %lld -> %llu\n", ci->i_max_size,
  554. le64_to_cpu(info->max_size));
  555. ci->i_max_size = le64_to_cpu(info->max_size);
  556. }
  557. ci->i_layout = info->layout;
  558. inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
  559. /* xattrs */
  560. /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
  561. if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
  562. le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
  563. if (ci->i_xattrs.blob)
  564. ceph_buffer_put(ci->i_xattrs.blob);
  565. ci->i_xattrs.blob = xattr_blob;
  566. if (xattr_blob)
  567. memcpy(ci->i_xattrs.blob->vec.iov_base,
  568. iinfo->xattr_data, iinfo->xattr_len);
  569. ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
  570. xattr_blob = NULL;
  571. }
  572. inode->i_mapping->a_ops = &ceph_aops;
  573. inode->i_mapping->backing_dev_info =
  574. &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
  575. switch (inode->i_mode & S_IFMT) {
  576. case S_IFIFO:
  577. case S_IFBLK:
  578. case S_IFCHR:
  579. case S_IFSOCK:
  580. init_special_inode(inode, inode->i_mode, inode->i_rdev);
  581. inode->i_op = &ceph_file_iops;
  582. break;
  583. case S_IFREG:
  584. inode->i_op = &ceph_file_iops;
  585. inode->i_fop = &ceph_file_fops;
  586. break;
  587. case S_IFLNK:
  588. inode->i_op = &ceph_symlink_iops;
  589. if (!ci->i_symlink) {
  590. int symlen = iinfo->symlink_len;
  591. char *sym;
  592. BUG_ON(symlen != inode->i_size);
  593. spin_unlock(&inode->i_lock);
  594. err = -ENOMEM;
  595. sym = kmalloc(symlen+1, GFP_NOFS);
  596. if (!sym)
  597. goto out;
  598. memcpy(sym, iinfo->symlink, symlen);
  599. sym[symlen] = 0;
  600. spin_lock(&inode->i_lock);
  601. if (!ci->i_symlink)
  602. ci->i_symlink = sym;
  603. else
  604. kfree(sym); /* lost a race */
  605. }
  606. break;
  607. case S_IFDIR:
  608. inode->i_op = &ceph_dir_iops;
  609. inode->i_fop = &ceph_dir_fops;
  610. ci->i_dir_layout = iinfo->dir_layout;
  611. ci->i_files = le64_to_cpu(info->files);
  612. ci->i_subdirs = le64_to_cpu(info->subdirs);
  613. ci->i_rbytes = le64_to_cpu(info->rbytes);
  614. ci->i_rfiles = le64_to_cpu(info->rfiles);
  615. ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
  616. ceph_decode_timespec(&ci->i_rctime, &info->rctime);
  617. /* set dir completion flag? */
  618. if (ci->i_files == 0 && ci->i_subdirs == 0 &&
  619. ceph_snap(inode) == CEPH_NOSNAP &&
  620. (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
  621. (issued & CEPH_CAP_FILE_EXCL) == 0 &&
  622. (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
  623. dout(" marking %p complete (empty)\n", inode);
  624. ci->i_ceph_flags |= CEPH_I_COMPLETE;
  625. ci->i_max_offset = 2;
  626. }
  627. /* it may be better to set st_size in getattr instead? */
  628. if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
  629. inode->i_size = ci->i_rbytes;
  630. break;
  631. default:
  632. pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
  633. ceph_vinop(inode), inode->i_mode);
  634. }
  635. no_change:
  636. spin_unlock(&inode->i_lock);
  637. /* queue truncate if we saw i_size decrease */
  638. if (queue_trunc)
  639. ceph_queue_vmtruncate(inode);
  640. /* populate frag tree */
  641. /* FIXME: move me up, if/when version reflects fragtree changes */
  642. nsplits = le32_to_cpu(info->fragtree.nsplits);
  643. mutex_lock(&ci->i_fragtree_mutex);
  644. for (i = 0; i < nsplits; i++) {
  645. u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
  646. struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
  647. if (IS_ERR(frag))
  648. continue;
  649. frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
  650. dout(" frag %x split by %d\n", frag->frag, frag->split_by);
  651. }
  652. mutex_unlock(&ci->i_fragtree_mutex);
  653. /* were we issued a capability? */
  654. if (info->cap.caps) {
  655. if (ceph_snap(inode) == CEPH_NOSNAP) {
  656. ceph_add_cap(inode, session,
  657. le64_to_cpu(info->cap.cap_id),
  658. cap_fmode,
  659. le32_to_cpu(info->cap.caps),
  660. le32_to_cpu(info->cap.wanted),
  661. le32_to_cpu(info->cap.seq),
  662. le32_to_cpu(info->cap.mseq),
  663. le64_to_cpu(info->cap.realm),
  664. info->cap.flags,
  665. caps_reservation);
  666. } else {
  667. spin_lock(&inode->i_lock);
  668. dout(" %p got snap_caps %s\n", inode,
  669. ceph_cap_string(le32_to_cpu(info->cap.caps)));
  670. ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
  671. if (cap_fmode >= 0)
  672. __ceph_get_fmode(ci, cap_fmode);
  673. spin_unlock(&inode->i_lock);
  674. }
  675. } else if (cap_fmode >= 0) {
  676. pr_warning("mds issued no caps on %llx.%llx\n",
  677. ceph_vinop(inode));
  678. __ceph_get_fmode(ci, cap_fmode);
  679. }
  680. /* update delegation info? */
  681. if (dirinfo)
  682. ceph_fill_dirfrag(inode, dirinfo);
  683. err = 0;
  684. out:
  685. if (xattr_blob)
  686. ceph_buffer_put(xattr_blob);
  687. return err;
  688. }
  689. /*
  690. * caller should hold session s_mutex.
  691. */
  692. static void update_dentry_lease(struct dentry *dentry,
  693. struct ceph_mds_reply_lease *lease,
  694. struct ceph_mds_session *session,
  695. unsigned long from_time)
  696. {
  697. struct ceph_dentry_info *di = ceph_dentry(dentry);
  698. long unsigned duration = le32_to_cpu(lease->duration_ms);
  699. long unsigned ttl = from_time + (duration * HZ) / 1000;
  700. long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
  701. struct inode *dir;
  702. /* only track leases on regular dentries */
  703. if (dentry->d_op != &ceph_dentry_ops)
  704. return;
  705. spin_lock(&dentry->d_lock);
  706. dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
  707. dentry, le16_to_cpu(lease->mask), duration, ttl);
  708. /* make lease_rdcache_gen match directory */
  709. dir = dentry->d_parent->d_inode;
  710. di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
  711. if (lease->mask == 0)
  712. goto out_unlock;
  713. if (di->lease_gen == session->s_cap_gen &&
  714. time_before(ttl, dentry->d_time))
  715. goto out_unlock; /* we already have a newer lease. */
  716. if (di->lease_session && di->lease_session != session)
  717. goto out_unlock;
  718. ceph_dentry_lru_touch(dentry);
  719. if (!di->lease_session)
  720. di->lease_session = ceph_get_mds_session(session);
  721. di->lease_gen = session->s_cap_gen;
  722. di->lease_seq = le32_to_cpu(lease->seq);
  723. di->lease_renew_after = half_ttl;
  724. di->lease_renew_from = 0;
  725. dentry->d_time = ttl;
  726. out_unlock:
  727. spin_unlock(&dentry->d_lock);
  728. return;
  729. }
  730. /*
  731. * Set dentry's directory position based on the current dir's max, and
  732. * order it in d_subdirs, so that dcache_readdir behaves.
  733. */
  734. static void ceph_set_dentry_offset(struct dentry *dn)
  735. {
  736. struct dentry *dir = dn->d_parent;
  737. struct inode *inode = dn->d_parent->d_inode;
  738. struct ceph_dentry_info *di;
  739. BUG_ON(!inode);
  740. di = ceph_dentry(dn);
  741. spin_lock(&inode->i_lock);
  742. if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
  743. spin_unlock(&inode->i_lock);
  744. return;
  745. }
  746. di->offset = ceph_inode(inode)->i_max_offset++;
  747. spin_unlock(&inode->i_lock);
  748. spin_lock(&dir->d_lock);
  749. spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
  750. list_move(&dn->d_u.d_child, &dir->d_subdirs);
  751. dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
  752. dn->d_u.d_child.prev, dn->d_u.d_child.next);
  753. spin_unlock(&dn->d_lock);
  754. spin_unlock(&dir->d_lock);
  755. }
  756. /*
  757. * splice a dentry to an inode.
  758. * caller must hold directory i_mutex for this to be safe.
  759. *
  760. * we will only rehash the resulting dentry if @prehash is
  761. * true; @prehash will be set to false (for the benefit of
  762. * the caller) if we fail.
  763. */
  764. static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
  765. bool *prehash, bool set_offset)
  766. {
  767. struct dentry *realdn;
  768. BUG_ON(dn->d_inode);
  769. /* dn must be unhashed */
  770. if (!d_unhashed(dn))
  771. d_drop(dn);
  772. realdn = d_materialise_unique(dn, in);
  773. if (IS_ERR(realdn)) {
  774. pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
  775. PTR_ERR(realdn), dn, in, ceph_vinop(in));
  776. if (prehash)
  777. *prehash = false; /* don't rehash on error */
  778. dn = realdn; /* note realdn contains the error */
  779. goto out;
  780. } else if (realdn) {
  781. dout("dn %p (%d) spliced with %p (%d) "
  782. "inode %p ino %llx.%llx\n",
  783. dn, dn->d_count,
  784. realdn, realdn->d_count,
  785. realdn->d_inode, ceph_vinop(realdn->d_inode));
  786. dput(dn);
  787. dn = realdn;
  788. } else {
  789. BUG_ON(!ceph_dentry(dn));
  790. dout("dn %p attached to %p ino %llx.%llx\n",
  791. dn, dn->d_inode, ceph_vinop(dn->d_inode));
  792. }
  793. if ((!prehash || *prehash) && d_unhashed(dn))
  794. d_rehash(dn);
  795. if (set_offset)
  796. ceph_set_dentry_offset(dn);
  797. out:
  798. return dn;
  799. }
  800. /*
  801. * Incorporate results into the local cache. This is either just
  802. * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
  803. * after a lookup).
  804. *
  805. * A reply may contain
  806. * a directory inode along with a dentry.
  807. * and/or a target inode
  808. *
  809. * Called with snap_rwsem (read).
  810. */
  811. int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
  812. struct ceph_mds_session *session)
  813. {
  814. struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
  815. struct inode *in = NULL;
  816. struct ceph_mds_reply_inode *ininfo;
  817. struct ceph_vino vino;
  818. struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
  819. int i = 0;
  820. int err = 0;
  821. dout("fill_trace %p is_dentry %d is_target %d\n", req,
  822. rinfo->head->is_dentry, rinfo->head->is_target);
  823. #if 0
  824. /*
  825. * Debugging hook:
  826. *
  827. * If we resend completed ops to a recovering mds, we get no
  828. * trace. Since that is very rare, pretend this is the case
  829. * to ensure the 'no trace' handlers in the callers behave.
  830. *
  831. * Fill in inodes unconditionally to avoid breaking cap
  832. * invariants.
  833. */
  834. if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
  835. pr_info("fill_trace faking empty trace on %lld %s\n",
  836. req->r_tid, ceph_mds_op_name(rinfo->head->op));
  837. if (rinfo->head->is_dentry) {
  838. rinfo->head->is_dentry = 0;
  839. err = fill_inode(req->r_locked_dir,
  840. &rinfo->diri, rinfo->dirfrag,
  841. session, req->r_request_started, -1);
  842. }
  843. if (rinfo->head->is_target) {
  844. rinfo->head->is_target = 0;
  845. ininfo = rinfo->targeti.in;
  846. vino.ino = le64_to_cpu(ininfo->ino);
  847. vino.snap = le64_to_cpu(ininfo->snapid);
  848. in = ceph_get_inode(sb, vino);
  849. err = fill_inode(in, &rinfo->targeti, NULL,
  850. session, req->r_request_started,
  851. req->r_fmode);
  852. iput(in);
  853. }
  854. }
  855. #endif
  856. if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
  857. dout("fill_trace reply is empty!\n");
  858. if (rinfo->head->result == 0 && req->r_locked_dir)
  859. ceph_invalidate_dir_request(req);
  860. return 0;
  861. }
  862. if (rinfo->head->is_dentry) {
  863. struct inode *dir = req->r_locked_dir;
  864. err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
  865. session, req->r_request_started, -1,
  866. &req->r_caps_reservation);
  867. if (err < 0)
  868. return err;
  869. }
  870. /*
  871. * ignore null lease/binding on snapdir ENOENT, or else we
  872. * will have trouble splicing in the virtual snapdir later
  873. */
  874. if (rinfo->head->is_dentry && !req->r_aborted &&
  875. (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
  876. fsc->mount_options->snapdir_name,
  877. req->r_dentry->d_name.len))) {
  878. /*
  879. * lookup link rename : null -> possibly existing inode
  880. * mknod symlink mkdir : null -> new inode
  881. * unlink : linked -> null
  882. */
  883. struct inode *dir = req->r_locked_dir;
  884. struct dentry *dn = req->r_dentry;
  885. bool have_dir_cap, have_lease;
  886. BUG_ON(!dn);
  887. BUG_ON(!dir);
  888. BUG_ON(dn->d_parent->d_inode != dir);
  889. BUG_ON(ceph_ino(dir) !=
  890. le64_to_cpu(rinfo->diri.in->ino));
  891. BUG_ON(ceph_snap(dir) !=
  892. le64_to_cpu(rinfo->diri.in->snapid));
  893. /* do we have a lease on the whole dir? */
  894. have_dir_cap =
  895. (le32_to_cpu(rinfo->diri.in->cap.caps) &
  896. CEPH_CAP_FILE_SHARED);
  897. /* do we have a dn lease? */
  898. have_lease = have_dir_cap ||
  899. (le16_to_cpu(rinfo->dlease->mask) &
  900. CEPH_LOCK_DN);
  901. if (!have_lease)
  902. dout("fill_trace no dentry lease or dir cap\n");
  903. /* rename? */
  904. if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
  905. dout(" src %p '%.*s' dst %p '%.*s'\n",
  906. req->r_old_dentry,
  907. req->r_old_dentry->d_name.len,
  908. req->r_old_dentry->d_name.name,
  909. dn, dn->d_name.len, dn->d_name.name);
  910. dout("fill_trace doing d_move %p -> %p\n",
  911. req->r_old_dentry, dn);
  912. /* d_move screws up d_subdirs order */
  913. ceph_i_clear(dir, CEPH_I_COMPLETE);
  914. d_move(req->r_old_dentry, dn);
  915. dout(" src %p '%.*s' dst %p '%.*s'\n",
  916. req->r_old_dentry,
  917. req->r_old_dentry->d_name.len,
  918. req->r_old_dentry->d_name.name,
  919. dn, dn->d_name.len, dn->d_name.name);
  920. /* ensure target dentry is invalidated, despite
  921. rehashing bug in vfs_rename_dir */
  922. ceph_invalidate_dentry_lease(dn);
  923. /* take overwritten dentry's readdir offset */
  924. dout("dn %p gets %p offset %lld (old offset %lld)\n",
  925. req->r_old_dentry, dn, ceph_dentry(dn)->offset,
  926. ceph_dentry(req->r_old_dentry)->offset);
  927. ceph_dentry(req->r_old_dentry)->offset =
  928. ceph_dentry(dn)->offset;
  929. dn = req->r_old_dentry; /* use old_dentry */
  930. in = dn->d_inode;
  931. }
  932. /* null dentry? */
  933. if (!rinfo->head->is_target) {
  934. dout("fill_trace null dentry\n");
  935. if (dn->d_inode) {
  936. dout("d_delete %p\n", dn);
  937. d_delete(dn);
  938. } else {
  939. dout("d_instantiate %p NULL\n", dn);
  940. d_instantiate(dn, NULL);
  941. if (have_lease && d_unhashed(dn))
  942. d_rehash(dn);
  943. update_dentry_lease(dn, rinfo->dlease,
  944. session,
  945. req->r_request_started);
  946. }
  947. goto done;
  948. }
  949. /* attach proper inode */
  950. ininfo = rinfo->targeti.in;
  951. vino.ino = le64_to_cpu(ininfo->ino);
  952. vino.snap = le64_to_cpu(ininfo->snapid);
  953. in = dn->d_inode;
  954. if (!in) {
  955. in = ceph_get_inode(sb, vino);
  956. if (IS_ERR(in)) {
  957. pr_err("fill_trace bad get_inode "
  958. "%llx.%llx\n", vino.ino, vino.snap);
  959. err = PTR_ERR(in);
  960. d_delete(dn);
  961. goto done;
  962. }
  963. dn = splice_dentry(dn, in, &have_lease, true);
  964. if (IS_ERR(dn)) {
  965. err = PTR_ERR(dn);
  966. goto done;
  967. }
  968. req->r_dentry = dn; /* may have spliced */
  969. igrab(in);
  970. } else if (ceph_ino(in) == vino.ino &&
  971. ceph_snap(in) == vino.snap) {
  972. igrab(in);
  973. } else {
  974. dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
  975. dn, in, ceph_ino(in), ceph_snap(in),
  976. vino.ino, vino.snap);
  977. have_lease = false;
  978. in = NULL;
  979. }
  980. if (have_lease)
  981. update_dentry_lease(dn, rinfo->dlease, session,
  982. req->r_request_started);
  983. dout(" final dn %p\n", dn);
  984. i++;
  985. } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
  986. req->r_op == CEPH_MDS_OP_MKSNAP) {
  987. struct dentry *dn = req->r_dentry;
  988. /* fill out a snapdir LOOKUPSNAP dentry */
  989. BUG_ON(!dn);
  990. BUG_ON(!req->r_locked_dir);
  991. BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
  992. ininfo = rinfo->targeti.in;
  993. vino.ino = le64_to_cpu(ininfo->ino);
  994. vino.snap = le64_to_cpu(ininfo->snapid);
  995. in = ceph_get_inode(sb, vino);
  996. if (IS_ERR(in)) {
  997. pr_err("fill_inode get_inode badness %llx.%llx\n",
  998. vino.ino, vino.snap);
  999. err = PTR_ERR(in);
  1000. d_delete(dn);
  1001. goto done;
  1002. }
  1003. dout(" linking snapped dir %p to dn %p\n", in, dn);
  1004. dn = splice_dentry(dn, in, NULL, true);
  1005. if (IS_ERR(dn)) {
  1006. err = PTR_ERR(dn);
  1007. goto done;
  1008. }
  1009. req->r_dentry = dn; /* may have spliced */
  1010. igrab(in);
  1011. rinfo->head->is_dentry = 1; /* fool notrace handlers */
  1012. }
  1013. if (rinfo->head->is_target) {
  1014. vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
  1015. vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
  1016. if (in == NULL || ceph_ino(in) != vino.ino ||
  1017. ceph_snap(in) != vino.snap) {
  1018. in = ceph_get_inode(sb, vino);
  1019. if (IS_ERR(in)) {
  1020. err = PTR_ERR(in);
  1021. goto done;
  1022. }
  1023. }
  1024. req->r_target_inode = in;
  1025. err = fill_inode(in,
  1026. &rinfo->targeti, NULL,
  1027. session, req->r_request_started,
  1028. (le32_to_cpu(rinfo->head->result) == 0) ?
  1029. req->r_fmode : -1,
  1030. &req->r_caps_reservation);
  1031. if (err < 0) {
  1032. pr_err("fill_inode badness %p %llx.%llx\n",
  1033. in, ceph_vinop(in));
  1034. goto done;
  1035. }
  1036. }
  1037. done:
  1038. dout("fill_trace done err=%d\n", err);
  1039. return err;
  1040. }
  1041. /*
  1042. * Prepopulate our cache with readdir results, leases, etc.
  1043. */
  1044. int ceph_readdir_prepopulate(struct ceph_mds_request *req,
  1045. struct ceph_mds_session *session)
  1046. {
  1047. struct dentry *parent = req->r_dentry;
  1048. struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
  1049. struct qstr dname;
  1050. struct dentry *dn;
  1051. struct inode *in;
  1052. int err = 0, i;
  1053. struct inode *snapdir = NULL;
  1054. struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
  1055. u64 frag = le32_to_cpu(rhead->args.readdir.frag);
  1056. struct ceph_dentry_info *di;
  1057. if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
  1058. snapdir = ceph_get_snapdir(parent->d_inode);
  1059. parent = d_find_alias(snapdir);
  1060. dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
  1061. rinfo->dir_nr, parent);
  1062. } else {
  1063. dout("readdir_prepopulate %d items under dn %p\n",
  1064. rinfo->dir_nr, parent);
  1065. if (rinfo->dir_dir)
  1066. ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
  1067. }
  1068. for (i = 0; i < rinfo->dir_nr; i++) {
  1069. struct ceph_vino vino;
  1070. dname.name = rinfo->dir_dname[i];
  1071. dname.len = rinfo->dir_dname_len[i];
  1072. dname.hash = full_name_hash(dname.name, dname.len);
  1073. vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
  1074. vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
  1075. retry_lookup:
  1076. dn = d_lookup(parent, &dname);
  1077. dout("d_lookup on parent=%p name=%.*s got %p\n",
  1078. parent, dname.len, dname.name, dn);
  1079. if (!dn) {
  1080. dn = d_alloc(parent, &dname);
  1081. dout("d_alloc %p '%.*s' = %p\n", parent,
  1082. dname.len, dname.name, dn);
  1083. if (dn == NULL) {
  1084. dout("d_alloc badness\n");
  1085. err = -ENOMEM;
  1086. goto out;
  1087. }
  1088. err = ceph_init_dentry(dn);
  1089. if (err < 0) {
  1090. dput(dn);
  1091. goto out;
  1092. }
  1093. } else if (dn->d_inode &&
  1094. (ceph_ino(dn->d_inode) != vino.ino ||
  1095. ceph_snap(dn->d_inode) != vino.snap)) {
  1096. dout(" dn %p points to wrong inode %p\n",
  1097. dn, dn->d_inode);
  1098. d_delete(dn);
  1099. dput(dn);
  1100. goto retry_lookup;
  1101. } else {
  1102. /* reorder parent's d_subdirs */
  1103. spin_lock(&parent->d_lock);
  1104. spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
  1105. list_move(&dn->d_u.d_child, &parent->d_subdirs);
  1106. spin_unlock(&dn->d_lock);
  1107. spin_unlock(&parent->d_lock);
  1108. }
  1109. di = dn->d_fsdata;
  1110. di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
  1111. /* inode */
  1112. if (dn->d_inode) {
  1113. in = dn->d_inode;
  1114. } else {
  1115. in = ceph_get_inode(parent->d_sb, vino);
  1116. if (IS_ERR(in)) {
  1117. dout("new_inode badness\n");
  1118. d_delete(dn);
  1119. dput(dn);
  1120. err = PTR_ERR(in);
  1121. goto out;
  1122. }
  1123. dn = splice_dentry(dn, in, NULL, false);
  1124. if (IS_ERR(dn))
  1125. dn = NULL;
  1126. }
  1127. if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
  1128. req->r_request_started, -1,
  1129. &req->r_caps_reservation) < 0) {
  1130. pr_err("fill_inode badness on %p\n", in);
  1131. goto next_item;
  1132. }
  1133. if (dn)
  1134. update_dentry_lease(dn, rinfo->dir_dlease[i],
  1135. req->r_session,
  1136. req->r_request_started);
  1137. next_item:
  1138. if (dn)
  1139. dput(dn);
  1140. }
  1141. req->r_did_prepopulate = true;
  1142. out:
  1143. if (snapdir) {
  1144. iput(snapdir);
  1145. dput(parent);
  1146. }
  1147. dout("readdir_prepopulate done\n");
  1148. return err;
  1149. }
  1150. int ceph_inode_set_size(struct inode *inode, loff_t size)
  1151. {
  1152. struct ceph_inode_info *ci = ceph_inode(inode);
  1153. int ret = 0;
  1154. spin_lock(&inode->i_lock);
  1155. dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
  1156. inode->i_size = size;
  1157. inode->i_blocks = (size + (1 << 9) - 1) >> 9;
  1158. /* tell the MDS if we are approaching max_size */
  1159. if ((size << 1) >= ci->i_max_size &&
  1160. (ci->i_reported_size << 1) < ci->i_max_size)
  1161. ret = 1;
  1162. spin_unlock(&inode->i_lock);
  1163. return ret;
  1164. }
  1165. /*
  1166. * Write back inode data in a worker thread. (This can't be done
  1167. * in the message handler context.)
  1168. */
  1169. void ceph_queue_writeback(struct inode *inode)
  1170. {
  1171. if (queue_work(ceph_inode_to_client(inode)->wb_wq,
  1172. &ceph_inode(inode)->i_wb_work)) {
  1173. dout("ceph_queue_writeback %p\n", inode);
  1174. igrab(inode);
  1175. } else {
  1176. dout("ceph_queue_writeback %p failed\n", inode);
  1177. }
  1178. }
  1179. static void ceph_writeback_work(struct work_struct *work)
  1180. {
  1181. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1182. i_wb_work);
  1183. struct inode *inode = &ci->vfs_inode;
  1184. dout("writeback %p\n", inode);
  1185. filemap_fdatawrite(&inode->i_data);
  1186. iput(inode);
  1187. }
  1188. /*
  1189. * queue an async invalidation
  1190. */
  1191. void ceph_queue_invalidate(struct inode *inode)
  1192. {
  1193. if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
  1194. &ceph_inode(inode)->i_pg_inv_work)) {
  1195. dout("ceph_queue_invalidate %p\n", inode);
  1196. igrab(inode);
  1197. } else {
  1198. dout("ceph_queue_invalidate %p failed\n", inode);
  1199. }
  1200. }
  1201. /*
  1202. * invalidate any pages that are not dirty or under writeback. this
  1203. * includes pages that are clean and mapped.
  1204. */
  1205. static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
  1206. {
  1207. struct pagevec pvec;
  1208. pgoff_t next = 0;
  1209. int i;
  1210. pagevec_init(&pvec, 0);
  1211. while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  1212. for (i = 0; i < pagevec_count(&pvec); i++) {
  1213. struct page *page = pvec.pages[i];
  1214. pgoff_t index;
  1215. int skip_page =
  1216. (PageDirty(page) || PageWriteback(page));
  1217. if (!skip_page)
  1218. skip_page = !trylock_page(page);
  1219. /*
  1220. * We really shouldn't be looking at the ->index of an
  1221. * unlocked page. But we're not allowed to lock these
  1222. * pages. So we rely upon nobody altering the ->index
  1223. * of this (pinned-by-us) page.
  1224. */
  1225. index = page->index;
  1226. if (index > next)
  1227. next = index;
  1228. next++;
  1229. if (skip_page)
  1230. continue;
  1231. generic_error_remove_page(mapping, page);
  1232. unlock_page(page);
  1233. }
  1234. pagevec_release(&pvec);
  1235. cond_resched();
  1236. }
  1237. }
  1238. /*
  1239. * Invalidate inode pages in a worker thread. (This can't be done
  1240. * in the message handler context.)
  1241. */
  1242. static void ceph_invalidate_work(struct work_struct *work)
  1243. {
  1244. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1245. i_pg_inv_work);
  1246. struct inode *inode = &ci->vfs_inode;
  1247. u32 orig_gen;
  1248. int check = 0;
  1249. spin_lock(&inode->i_lock);
  1250. dout("invalidate_pages %p gen %d revoking %d\n", inode,
  1251. ci->i_rdcache_gen, ci->i_rdcache_revoking);
  1252. if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
  1253. /* nevermind! */
  1254. spin_unlock(&inode->i_lock);
  1255. goto out;
  1256. }
  1257. orig_gen = ci->i_rdcache_gen;
  1258. spin_unlock(&inode->i_lock);
  1259. ceph_invalidate_nondirty_pages(inode->i_mapping);
  1260. spin_lock(&inode->i_lock);
  1261. if (orig_gen == ci->i_rdcache_gen &&
  1262. orig_gen == ci->i_rdcache_revoking) {
  1263. dout("invalidate_pages %p gen %d successful\n", inode,
  1264. ci->i_rdcache_gen);
  1265. ci->i_rdcache_revoking--;
  1266. check = 1;
  1267. } else {
  1268. dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
  1269. inode, orig_gen, ci->i_rdcache_gen,
  1270. ci->i_rdcache_revoking);
  1271. }
  1272. spin_unlock(&inode->i_lock);
  1273. if (check)
  1274. ceph_check_caps(ci, 0, NULL);
  1275. out:
  1276. iput(inode);
  1277. }
  1278. /*
  1279. * called by trunc_wq; take i_mutex ourselves
  1280. *
  1281. * We also truncate in a separate thread as well.
  1282. */
  1283. static void ceph_vmtruncate_work(struct work_struct *work)
  1284. {
  1285. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1286. i_vmtruncate_work);
  1287. struct inode *inode = &ci->vfs_inode;
  1288. dout("vmtruncate_work %p\n", inode);
  1289. mutex_lock(&inode->i_mutex);
  1290. __ceph_do_pending_vmtruncate(inode);
  1291. mutex_unlock(&inode->i_mutex);
  1292. iput(inode);
  1293. }
  1294. /*
  1295. * Queue an async vmtruncate. If we fail to queue work, we will handle
  1296. * the truncation the next time we call __ceph_do_pending_vmtruncate.
  1297. */
  1298. void ceph_queue_vmtruncate(struct inode *inode)
  1299. {
  1300. struct ceph_inode_info *ci = ceph_inode(inode);
  1301. if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
  1302. &ci->i_vmtruncate_work)) {
  1303. dout("ceph_queue_vmtruncate %p\n", inode);
  1304. igrab(inode);
  1305. } else {
  1306. dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
  1307. inode, ci->i_truncate_pending);
  1308. }
  1309. }
  1310. /*
  1311. * called with i_mutex held.
  1312. *
  1313. * Make sure any pending truncation is applied before doing anything
  1314. * that may depend on it.
  1315. */
  1316. void __ceph_do_pending_vmtruncate(struct inode *inode)
  1317. {
  1318. struct ceph_inode_info *ci = ceph_inode(inode);
  1319. u64 to;
  1320. int wrbuffer_refs, wake = 0;
  1321. retry:
  1322. spin_lock(&inode->i_lock);
  1323. if (ci->i_truncate_pending == 0) {
  1324. dout("__do_pending_vmtruncate %p none pending\n", inode);
  1325. spin_unlock(&inode->i_lock);
  1326. return;
  1327. }
  1328. /*
  1329. * make sure any dirty snapped pages are flushed before we
  1330. * possibly truncate them.. so write AND block!
  1331. */
  1332. if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
  1333. dout("__do_pending_vmtruncate %p flushing snaps first\n",
  1334. inode);
  1335. spin_unlock(&inode->i_lock);
  1336. filemap_write_and_wait_range(&inode->i_data, 0,
  1337. inode->i_sb->s_maxbytes);
  1338. goto retry;
  1339. }
  1340. to = ci->i_truncate_size;
  1341. wrbuffer_refs = ci->i_wrbuffer_ref;
  1342. dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
  1343. ci->i_truncate_pending, to);
  1344. spin_unlock(&inode->i_lock);
  1345. truncate_inode_pages(inode->i_mapping, to);
  1346. spin_lock(&inode->i_lock);
  1347. ci->i_truncate_pending--;
  1348. if (ci->i_truncate_pending == 0)
  1349. wake = 1;
  1350. spin_unlock(&inode->i_lock);
  1351. if (wrbuffer_refs == 0)
  1352. ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
  1353. if (wake)
  1354. wake_up_all(&ci->i_cap_wq);
  1355. }
  1356. /*
  1357. * symlinks
  1358. */
  1359. static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
  1360. {
  1361. struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
  1362. nd_set_link(nd, ci->i_symlink);
  1363. return NULL;
  1364. }
  1365. static const struct inode_operations ceph_symlink_iops = {
  1366. .readlink = generic_readlink,
  1367. .follow_link = ceph_sym_follow_link,
  1368. };
  1369. /*
  1370. * setattr
  1371. */
  1372. int ceph_setattr(struct dentry *dentry, struct iattr *attr)
  1373. {
  1374. struct inode *inode = dentry->d_inode;
  1375. struct ceph_inode_info *ci = ceph_inode(inode);
  1376. struct inode *parent_inode = dentry->d_parent->d_inode;
  1377. const unsigned int ia_valid = attr->ia_valid;
  1378. struct ceph_mds_request *req;
  1379. struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
  1380. int issued;
  1381. int release = 0, dirtied = 0;
  1382. int mask = 0;
  1383. int err = 0;
  1384. if (ceph_snap(inode) != CEPH_NOSNAP)
  1385. return -EROFS;
  1386. __ceph_do_pending_vmtruncate(inode);
  1387. err = inode_change_ok(inode, attr);
  1388. if (err != 0)
  1389. return err;
  1390. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
  1391. USE_AUTH_MDS);
  1392. if (IS_ERR(req))
  1393. return PTR_ERR(req);
  1394. spin_lock(&inode->i_lock);
  1395. issued = __ceph_caps_issued(ci, NULL);
  1396. dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
  1397. if (ia_valid & ATTR_UID) {
  1398. dout("setattr %p uid %d -> %d\n", inode,
  1399. inode->i_uid, attr->ia_uid);
  1400. if (issued & CEPH_CAP_AUTH_EXCL) {
  1401. inode->i_uid = attr->ia_uid;
  1402. dirtied |= CEPH_CAP_AUTH_EXCL;
  1403. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1404. attr->ia_uid != inode->i_uid) {
  1405. req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
  1406. mask |= CEPH_SETATTR_UID;
  1407. release |= CEPH_CAP_AUTH_SHARED;
  1408. }
  1409. }
  1410. if (ia_valid & ATTR_GID) {
  1411. dout("setattr %p gid %d -> %d\n", inode,
  1412. inode->i_gid, attr->ia_gid);
  1413. if (issued & CEPH_CAP_AUTH_EXCL) {
  1414. inode->i_gid = attr->ia_gid;
  1415. dirtied |= CEPH_CAP_AUTH_EXCL;
  1416. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1417. attr->ia_gid != inode->i_gid) {
  1418. req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
  1419. mask |= CEPH_SETATTR_GID;
  1420. release |= CEPH_CAP_AUTH_SHARED;
  1421. }
  1422. }
  1423. if (ia_valid & ATTR_MODE) {
  1424. dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
  1425. attr->ia_mode);
  1426. if (issued & CEPH_CAP_AUTH_EXCL) {
  1427. inode->i_mode = attr->ia_mode;
  1428. dirtied |= CEPH_CAP_AUTH_EXCL;
  1429. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1430. attr->ia_mode != inode->i_mode) {
  1431. req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
  1432. mask |= CEPH_SETATTR_MODE;
  1433. release |= CEPH_CAP_AUTH_SHARED;
  1434. }
  1435. }
  1436. if (ia_valid & ATTR_ATIME) {
  1437. dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
  1438. inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
  1439. attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
  1440. if (issued & CEPH_CAP_FILE_EXCL) {
  1441. ci->i_time_warp_seq++;
  1442. inode->i_atime = attr->ia_atime;
  1443. dirtied |= CEPH_CAP_FILE_EXCL;
  1444. } else if ((issued & CEPH_CAP_FILE_WR) &&
  1445. timespec_compare(&inode->i_atime,
  1446. &attr->ia_atime) < 0) {
  1447. inode->i_atime = attr->ia_atime;
  1448. dirtied |= CEPH_CAP_FILE_WR;
  1449. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1450. !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
  1451. ceph_encode_timespec(&req->r_args.setattr.atime,
  1452. &attr->ia_atime);
  1453. mask |= CEPH_SETATTR_ATIME;
  1454. release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
  1455. CEPH_CAP_FILE_WR;
  1456. }
  1457. }
  1458. if (ia_valid & ATTR_MTIME) {
  1459. dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
  1460. inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
  1461. attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
  1462. if (issued & CEPH_CAP_FILE_EXCL) {
  1463. ci->i_time_warp_seq++;
  1464. inode->i_mtime = attr->ia_mtime;
  1465. dirtied |= CEPH_CAP_FILE_EXCL;
  1466. } else if ((issued & CEPH_CAP_FILE_WR) &&
  1467. timespec_compare(&inode->i_mtime,
  1468. &attr->ia_mtime) < 0) {
  1469. inode->i_mtime = attr->ia_mtime;
  1470. dirtied |= CEPH_CAP_FILE_WR;
  1471. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1472. !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
  1473. ceph_encode_timespec(&req->r_args.setattr.mtime,
  1474. &attr->ia_mtime);
  1475. mask |= CEPH_SETATTR_MTIME;
  1476. release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
  1477. CEPH_CAP_FILE_WR;
  1478. }
  1479. }
  1480. if (ia_valid & ATTR_SIZE) {
  1481. dout("setattr %p size %lld -> %lld\n", inode,
  1482. inode->i_size, attr->ia_size);
  1483. if (attr->ia_size > inode->i_sb->s_maxbytes) {
  1484. err = -EINVAL;
  1485. goto out;
  1486. }
  1487. if ((issued & CEPH_CAP_FILE_EXCL) &&
  1488. attr->ia_size > inode->i_size) {
  1489. inode->i_size = attr->ia_size;
  1490. inode->i_blocks =
  1491. (attr->ia_size + (1 << 9) - 1) >> 9;
  1492. inode->i_ctime = attr->ia_ctime;
  1493. ci->i_reported_size = attr->ia_size;
  1494. dirtied |= CEPH_CAP_FILE_EXCL;
  1495. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1496. attr->ia_size != inode->i_size) {
  1497. req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
  1498. req->r_args.setattr.old_size =
  1499. cpu_to_le64(inode->i_size);
  1500. mask |= CEPH_SETATTR_SIZE;
  1501. release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
  1502. CEPH_CAP_FILE_WR;
  1503. }
  1504. }
  1505. /* these do nothing */
  1506. if (ia_valid & ATTR_CTIME) {
  1507. bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
  1508. ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
  1509. dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
  1510. inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
  1511. attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
  1512. only ? "ctime only" : "ignored");
  1513. inode->i_ctime = attr->ia_ctime;
  1514. if (only) {
  1515. /*
  1516. * if kernel wants to dirty ctime but nothing else,
  1517. * we need to choose a cap to dirty under, or do
  1518. * a almost-no-op setattr
  1519. */
  1520. if (issued & CEPH_CAP_AUTH_EXCL)
  1521. dirtied |= CEPH_CAP_AUTH_EXCL;
  1522. else if (issued & CEPH_CAP_FILE_EXCL)
  1523. dirtied |= CEPH_CAP_FILE_EXCL;
  1524. else if (issued & CEPH_CAP_XATTR_EXCL)
  1525. dirtied |= CEPH_CAP_XATTR_EXCL;
  1526. else
  1527. mask |= CEPH_SETATTR_CTIME;
  1528. }
  1529. }
  1530. if (ia_valid & ATTR_FILE)
  1531. dout("setattr %p ATTR_FILE ... hrm!\n", inode);
  1532. if (dirtied) {
  1533. __ceph_mark_dirty_caps(ci, dirtied);
  1534. inode->i_ctime = CURRENT_TIME;
  1535. }
  1536. release &= issued;
  1537. spin_unlock(&inode->i_lock);
  1538. if (mask) {
  1539. req->r_inode = igrab(inode);
  1540. req->r_inode_drop = release;
  1541. req->r_args.setattr.mask = cpu_to_le32(mask);
  1542. req->r_num_caps = 1;
  1543. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  1544. }
  1545. dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
  1546. ceph_cap_string(dirtied), mask);
  1547. ceph_mdsc_put_request(req);
  1548. __ceph_do_pending_vmtruncate(inode);
  1549. return err;
  1550. out:
  1551. spin_unlock(&inode->i_lock);
  1552. ceph_mdsc_put_request(req);
  1553. return err;
  1554. }
  1555. /*
  1556. * Verify that we have a lease on the given mask. If not,
  1557. * do a getattr against an mds.
  1558. */
  1559. int ceph_do_getattr(struct inode *inode, int mask)
  1560. {
  1561. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  1562. struct ceph_mds_client *mdsc = fsc->mdsc;
  1563. struct ceph_mds_request *req;
  1564. int err;
  1565. if (ceph_snap(inode) == CEPH_SNAPDIR) {
  1566. dout("do_getattr inode %p SNAPDIR\n", inode);
  1567. return 0;
  1568. }
  1569. dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
  1570. if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
  1571. return 0;
  1572. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
  1573. if (IS_ERR(req))
  1574. return PTR_ERR(req);
  1575. req->r_inode = igrab(inode);
  1576. req->r_num_caps = 1;
  1577. req->r_args.getattr.mask = cpu_to_le32(mask);
  1578. err = ceph_mdsc_do_request(mdsc, NULL, req);
  1579. ceph_mdsc_put_request(req);
  1580. dout("do_getattr result=%d\n", err);
  1581. return err;
  1582. }
  1583. /*
  1584. * Check inode permissions. We verify we have a valid value for
  1585. * the AUTH cap, then call the generic handler.
  1586. */
  1587. int ceph_permission(struct inode *inode, int mask, unsigned int flags)
  1588. {
  1589. int err;
  1590. if (flags & IPERM_FLAG_RCU)
  1591. return -ECHILD;
  1592. err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
  1593. if (!err)
  1594. err = generic_permission(inode, mask, flags, NULL);
  1595. return err;
  1596. }
  1597. /*
  1598. * Get all attributes. Hopefully somedata we'll have a statlite()
  1599. * and can limit the fields we require to be accurate.
  1600. */
  1601. int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
  1602. struct kstat *stat)
  1603. {
  1604. struct inode *inode = dentry->d_inode;
  1605. struct ceph_inode_info *ci = ceph_inode(inode);
  1606. int err;
  1607. err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
  1608. if (!err) {
  1609. generic_fillattr(inode, stat);
  1610. stat->ino = inode->i_ino;
  1611. if (ceph_snap(inode) != CEPH_NOSNAP)
  1612. stat->dev = ceph_snap(inode);
  1613. else
  1614. stat->dev = 0;
  1615. if (S_ISDIR(inode->i_mode)) {
  1616. stat->size = ci->i_rbytes;
  1617. stat->blocks = 0;
  1618. stat->blksize = 65536;
  1619. }
  1620. }
  1621. return err;
  1622. }