inode.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/fs.h>
  4. #include <linux/slab.h>
  5. #include <linux/string.h>
  6. #include <linux/uaccess.h>
  7. #include <linux/kernel.h>
  8. #include <linux/namei.h>
  9. #include <linux/writeback.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/pagevec.h>
  12. #include "super.h"
  13. #include "mds_client.h"
  14. #include <linux/ceph/decode.h>
  15. /*
  16. * Ceph inode operations
  17. *
  18. * Implement basic inode helpers (get, alloc) and inode ops (getattr,
  19. * setattr, etc.), xattr helpers, and helpers for assimilating
  20. * metadata returned by the MDS into our cache.
  21. *
  22. * Also define helpers for doing asynchronous writeback, invalidation,
  23. * and truncation for the benefit of those who can't afford to block
  24. * (typically because they are in the message handler path).
  25. */
  26. static const struct inode_operations ceph_symlink_iops;
  27. static void ceph_invalidate_work(struct work_struct *work);
  28. static void ceph_writeback_work(struct work_struct *work);
  29. static void ceph_vmtruncate_work(struct work_struct *work);
  30. /*
  31. * find or create an inode, given the ceph ino number
  32. */
  33. static int ceph_set_ino_cb(struct inode *inode, void *data)
  34. {
  35. ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
  36. inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
  37. return 0;
  38. }
  39. struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
  40. {
  41. struct inode *inode;
  42. ino_t t = ceph_vino_to_ino(vino);
  43. inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
  44. if (inode == NULL)
  45. return ERR_PTR(-ENOMEM);
  46. if (inode->i_state & I_NEW) {
  47. dout("get_inode created new inode %p %llx.%llx ino %llx\n",
  48. inode, ceph_vinop(inode), (u64)inode->i_ino);
  49. unlock_new_inode(inode);
  50. }
  51. dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
  52. vino.snap, inode);
  53. return inode;
  54. }
  55. /*
  56. * get/constuct snapdir inode for a given directory
  57. */
  58. struct inode *ceph_get_snapdir(struct inode *parent)
  59. {
  60. struct ceph_vino vino = {
  61. .ino = ceph_ino(parent),
  62. .snap = CEPH_SNAPDIR,
  63. };
  64. struct inode *inode = ceph_get_inode(parent->i_sb, vino);
  65. struct ceph_inode_info *ci = ceph_inode(inode);
  66. BUG_ON(!S_ISDIR(parent->i_mode));
  67. if (IS_ERR(inode))
  68. return inode;
  69. inode->i_mode = parent->i_mode;
  70. inode->i_uid = parent->i_uid;
  71. inode->i_gid = parent->i_gid;
  72. inode->i_op = &ceph_dir_iops;
  73. inode->i_fop = &ceph_dir_fops;
  74. ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
  75. ci->i_rbytes = 0;
  76. return inode;
  77. }
  78. const struct inode_operations ceph_file_iops = {
  79. .permission = ceph_permission,
  80. .setattr = ceph_setattr,
  81. .getattr = ceph_getattr,
  82. .setxattr = ceph_setxattr,
  83. .getxattr = ceph_getxattr,
  84. .listxattr = ceph_listxattr,
  85. .removexattr = ceph_removexattr,
  86. };
  87. /*
  88. * We use a 'frag tree' to keep track of the MDS's directory fragments
  89. * for a given inode (usually there is just a single fragment). We
  90. * need to know when a child frag is delegated to a new MDS, or when
  91. * it is flagged as replicated, so we can direct our requests
  92. * accordingly.
  93. */
  94. /*
  95. * find/create a frag in the tree
  96. */
  97. static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
  98. u32 f)
  99. {
  100. struct rb_node **p;
  101. struct rb_node *parent = NULL;
  102. struct ceph_inode_frag *frag;
  103. int c;
  104. p = &ci->i_fragtree.rb_node;
  105. while (*p) {
  106. parent = *p;
  107. frag = rb_entry(parent, struct ceph_inode_frag, node);
  108. c = ceph_frag_compare(f, frag->frag);
  109. if (c < 0)
  110. p = &(*p)->rb_left;
  111. else if (c > 0)
  112. p = &(*p)->rb_right;
  113. else
  114. return frag;
  115. }
  116. frag = kmalloc(sizeof(*frag), GFP_NOFS);
  117. if (!frag) {
  118. pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
  119. "frag %x\n", &ci->vfs_inode,
  120. ceph_vinop(&ci->vfs_inode), f);
  121. return ERR_PTR(-ENOMEM);
  122. }
  123. frag->frag = f;
  124. frag->split_by = 0;
  125. frag->mds = -1;
  126. frag->ndist = 0;
  127. rb_link_node(&frag->node, parent, p);
  128. rb_insert_color(&frag->node, &ci->i_fragtree);
  129. dout("get_or_create_frag added %llx.%llx frag %x\n",
  130. ceph_vinop(&ci->vfs_inode), f);
  131. return frag;
  132. }
  133. /*
  134. * find a specific frag @f
  135. */
  136. struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
  137. {
  138. struct rb_node *n = ci->i_fragtree.rb_node;
  139. while (n) {
  140. struct ceph_inode_frag *frag =
  141. rb_entry(n, struct ceph_inode_frag, node);
  142. int c = ceph_frag_compare(f, frag->frag);
  143. if (c < 0)
  144. n = n->rb_left;
  145. else if (c > 0)
  146. n = n->rb_right;
  147. else
  148. return frag;
  149. }
  150. return NULL;
  151. }
  152. /*
  153. * Choose frag containing the given value @v. If @pfrag is
  154. * specified, copy the frag delegation info to the caller if
  155. * it is present.
  156. */
  157. u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
  158. struct ceph_inode_frag *pfrag,
  159. int *found)
  160. {
  161. u32 t = ceph_frag_make(0, 0);
  162. struct ceph_inode_frag *frag;
  163. unsigned nway, i;
  164. u32 n;
  165. if (found)
  166. *found = 0;
  167. mutex_lock(&ci->i_fragtree_mutex);
  168. while (1) {
  169. WARN_ON(!ceph_frag_contains_value(t, v));
  170. frag = __ceph_find_frag(ci, t);
  171. if (!frag)
  172. break; /* t is a leaf */
  173. if (frag->split_by == 0) {
  174. if (pfrag)
  175. memcpy(pfrag, frag, sizeof(*pfrag));
  176. if (found)
  177. *found = 1;
  178. break;
  179. }
  180. /* choose child */
  181. nway = 1 << frag->split_by;
  182. dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
  183. frag->split_by, nway);
  184. for (i = 0; i < nway; i++) {
  185. n = ceph_frag_make_child(t, frag->split_by, i);
  186. if (ceph_frag_contains_value(n, v)) {
  187. t = n;
  188. break;
  189. }
  190. }
  191. BUG_ON(i == nway);
  192. }
  193. dout("choose_frag(%x) = %x\n", v, t);
  194. mutex_unlock(&ci->i_fragtree_mutex);
  195. return t;
  196. }
  197. /*
  198. * Process dirfrag (delegation) info from the mds. Include leaf
  199. * fragment in tree ONLY if ndist > 0. Otherwise, only
  200. * branches/splits are included in i_fragtree)
  201. */
  202. static int ceph_fill_dirfrag(struct inode *inode,
  203. struct ceph_mds_reply_dirfrag *dirinfo)
  204. {
  205. struct ceph_inode_info *ci = ceph_inode(inode);
  206. struct ceph_inode_frag *frag;
  207. u32 id = le32_to_cpu(dirinfo->frag);
  208. int mds = le32_to_cpu(dirinfo->auth);
  209. int ndist = le32_to_cpu(dirinfo->ndist);
  210. int i;
  211. int err = 0;
  212. mutex_lock(&ci->i_fragtree_mutex);
  213. if (ndist == 0) {
  214. /* no delegation info needed. */
  215. frag = __ceph_find_frag(ci, id);
  216. if (!frag)
  217. goto out;
  218. if (frag->split_by == 0) {
  219. /* tree leaf, remove */
  220. dout("fill_dirfrag removed %llx.%llx frag %x"
  221. " (no ref)\n", ceph_vinop(inode), id);
  222. rb_erase(&frag->node, &ci->i_fragtree);
  223. kfree(frag);
  224. } else {
  225. /* tree branch, keep and clear */
  226. dout("fill_dirfrag cleared %llx.%llx frag %x"
  227. " referral\n", ceph_vinop(inode), id);
  228. frag->mds = -1;
  229. frag->ndist = 0;
  230. }
  231. goto out;
  232. }
  233. /* find/add this frag to store mds delegation info */
  234. frag = __get_or_create_frag(ci, id);
  235. if (IS_ERR(frag)) {
  236. /* this is not the end of the world; we can continue
  237. with bad/inaccurate delegation info */
  238. pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
  239. ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
  240. err = -ENOMEM;
  241. goto out;
  242. }
  243. frag->mds = mds;
  244. frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
  245. for (i = 0; i < frag->ndist; i++)
  246. frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
  247. dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
  248. ceph_vinop(inode), frag->frag, frag->ndist);
  249. out:
  250. mutex_unlock(&ci->i_fragtree_mutex);
  251. return err;
  252. }
  253. /*
  254. * initialize a newly allocated inode.
  255. */
  256. struct inode *ceph_alloc_inode(struct super_block *sb)
  257. {
  258. struct ceph_inode_info *ci;
  259. int i;
  260. ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
  261. if (!ci)
  262. return NULL;
  263. dout("alloc_inode %p\n", &ci->vfs_inode);
  264. ci->i_version = 0;
  265. ci->i_time_warp_seq = 0;
  266. ci->i_ceph_flags = 0;
  267. ci->i_release_count = 0;
  268. ci->i_symlink = NULL;
  269. memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
  270. ci->i_fragtree = RB_ROOT;
  271. mutex_init(&ci->i_fragtree_mutex);
  272. ci->i_xattrs.blob = NULL;
  273. ci->i_xattrs.prealloc_blob = NULL;
  274. ci->i_xattrs.dirty = false;
  275. ci->i_xattrs.index = RB_ROOT;
  276. ci->i_xattrs.count = 0;
  277. ci->i_xattrs.names_size = 0;
  278. ci->i_xattrs.vals_size = 0;
  279. ci->i_xattrs.version = 0;
  280. ci->i_xattrs.index_version = 0;
  281. ci->i_caps = RB_ROOT;
  282. ci->i_auth_cap = NULL;
  283. ci->i_dirty_caps = 0;
  284. ci->i_flushing_caps = 0;
  285. INIT_LIST_HEAD(&ci->i_dirty_item);
  286. INIT_LIST_HEAD(&ci->i_flushing_item);
  287. ci->i_cap_flush_seq = 0;
  288. ci->i_cap_flush_last_tid = 0;
  289. memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
  290. init_waitqueue_head(&ci->i_cap_wq);
  291. ci->i_hold_caps_min = 0;
  292. ci->i_hold_caps_max = 0;
  293. INIT_LIST_HEAD(&ci->i_cap_delay_list);
  294. ci->i_cap_exporting_mds = 0;
  295. ci->i_cap_exporting_mseq = 0;
  296. ci->i_cap_exporting_issued = 0;
  297. INIT_LIST_HEAD(&ci->i_cap_snaps);
  298. ci->i_head_snapc = NULL;
  299. ci->i_snap_caps = 0;
  300. for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
  301. ci->i_nr_by_mode[i] = 0;
  302. ci->i_truncate_seq = 0;
  303. ci->i_truncate_size = 0;
  304. ci->i_truncate_pending = 0;
  305. ci->i_max_size = 0;
  306. ci->i_reported_size = 0;
  307. ci->i_wanted_max_size = 0;
  308. ci->i_requested_max_size = 0;
  309. ci->i_pin_ref = 0;
  310. ci->i_rd_ref = 0;
  311. ci->i_rdcache_ref = 0;
  312. ci->i_wr_ref = 0;
  313. ci->i_wrbuffer_ref = 0;
  314. ci->i_wrbuffer_ref_head = 0;
  315. ci->i_shared_gen = 0;
  316. ci->i_rdcache_gen = 0;
  317. ci->i_rdcache_revoking = 0;
  318. INIT_LIST_HEAD(&ci->i_unsafe_writes);
  319. INIT_LIST_HEAD(&ci->i_unsafe_dirops);
  320. spin_lock_init(&ci->i_unsafe_lock);
  321. ci->i_snap_realm = NULL;
  322. INIT_LIST_HEAD(&ci->i_snap_realm_item);
  323. INIT_LIST_HEAD(&ci->i_snap_flush_item);
  324. INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
  325. INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
  326. INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
  327. return &ci->vfs_inode;
  328. }
  329. static void ceph_i_callback(struct rcu_head *head)
  330. {
  331. struct inode *inode = container_of(head, struct inode, i_rcu);
  332. struct ceph_inode_info *ci = ceph_inode(inode);
  333. INIT_LIST_HEAD(&inode->i_dentry);
  334. kmem_cache_free(ceph_inode_cachep, ci);
  335. }
  336. void ceph_destroy_inode(struct inode *inode)
  337. {
  338. struct ceph_inode_info *ci = ceph_inode(inode);
  339. struct ceph_inode_frag *frag;
  340. struct rb_node *n;
  341. dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
  342. ceph_queue_caps_release(inode);
  343. /*
  344. * we may still have a snap_realm reference if there are stray
  345. * caps in i_cap_exporting_issued or i_snap_caps.
  346. */
  347. if (ci->i_snap_realm) {
  348. struct ceph_mds_client *mdsc =
  349. ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
  350. struct ceph_snap_realm *realm = ci->i_snap_realm;
  351. dout(" dropping residual ref to snap realm %p\n", realm);
  352. spin_lock(&realm->inodes_with_caps_lock);
  353. list_del_init(&ci->i_snap_realm_item);
  354. spin_unlock(&realm->inodes_with_caps_lock);
  355. ceph_put_snap_realm(mdsc, realm);
  356. }
  357. kfree(ci->i_symlink);
  358. while ((n = rb_first(&ci->i_fragtree)) != NULL) {
  359. frag = rb_entry(n, struct ceph_inode_frag, node);
  360. rb_erase(n, &ci->i_fragtree);
  361. kfree(frag);
  362. }
  363. __ceph_destroy_xattrs(ci);
  364. if (ci->i_xattrs.blob)
  365. ceph_buffer_put(ci->i_xattrs.blob);
  366. if (ci->i_xattrs.prealloc_blob)
  367. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  368. call_rcu(&inode->i_rcu, ceph_i_callback);
  369. }
  370. /*
  371. * Helpers to fill in size, ctime, mtime, and atime. We have to be
  372. * careful because either the client or MDS may have more up to date
  373. * info, depending on which capabilities are held, and whether
  374. * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
  375. * and size are monotonically increasing, except when utimes() or
  376. * truncate() increments the corresponding _seq values.)
  377. */
  378. int ceph_fill_file_size(struct inode *inode, int issued,
  379. u32 truncate_seq, u64 truncate_size, u64 size)
  380. {
  381. struct ceph_inode_info *ci = ceph_inode(inode);
  382. int queue_trunc = 0;
  383. if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
  384. (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
  385. dout("size %lld -> %llu\n", inode->i_size, size);
  386. inode->i_size = size;
  387. inode->i_blocks = (size + (1<<9) - 1) >> 9;
  388. ci->i_reported_size = size;
  389. if (truncate_seq != ci->i_truncate_seq) {
  390. dout("truncate_seq %u -> %u\n",
  391. ci->i_truncate_seq, truncate_seq);
  392. ci->i_truncate_seq = truncate_seq;
  393. /*
  394. * If we hold relevant caps, or in the case where we're
  395. * not the only client referencing this file and we
  396. * don't hold those caps, then we need to check whether
  397. * the file is either opened or mmaped
  398. */
  399. if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
  400. CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
  401. CEPH_CAP_FILE_EXCL|
  402. CEPH_CAP_FILE_LAZYIO)) ||
  403. mapping_mapped(inode->i_mapping) ||
  404. __ceph_caps_file_wanted(ci)) {
  405. ci->i_truncate_pending++;
  406. queue_trunc = 1;
  407. }
  408. }
  409. }
  410. if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
  411. ci->i_truncate_size != truncate_size) {
  412. dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
  413. truncate_size);
  414. ci->i_truncate_size = truncate_size;
  415. }
  416. return queue_trunc;
  417. }
  418. void ceph_fill_file_time(struct inode *inode, int issued,
  419. u64 time_warp_seq, struct timespec *ctime,
  420. struct timespec *mtime, struct timespec *atime)
  421. {
  422. struct ceph_inode_info *ci = ceph_inode(inode);
  423. int warn = 0;
  424. if (issued & (CEPH_CAP_FILE_EXCL|
  425. CEPH_CAP_FILE_WR|
  426. CEPH_CAP_FILE_BUFFER|
  427. CEPH_CAP_AUTH_EXCL|
  428. CEPH_CAP_XATTR_EXCL)) {
  429. if (timespec_compare(ctime, &inode->i_ctime) > 0) {
  430. dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
  431. inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
  432. ctime->tv_sec, ctime->tv_nsec);
  433. inode->i_ctime = *ctime;
  434. }
  435. if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
  436. /* the MDS did a utimes() */
  437. dout("mtime %ld.%09ld -> %ld.%09ld "
  438. "tw %d -> %d\n",
  439. inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
  440. mtime->tv_sec, mtime->tv_nsec,
  441. ci->i_time_warp_seq, (int)time_warp_seq);
  442. inode->i_mtime = *mtime;
  443. inode->i_atime = *atime;
  444. ci->i_time_warp_seq = time_warp_seq;
  445. } else if (time_warp_seq == ci->i_time_warp_seq) {
  446. /* nobody did utimes(); take the max */
  447. if (timespec_compare(mtime, &inode->i_mtime) > 0) {
  448. dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
  449. inode->i_mtime.tv_sec,
  450. inode->i_mtime.tv_nsec,
  451. mtime->tv_sec, mtime->tv_nsec);
  452. inode->i_mtime = *mtime;
  453. }
  454. if (timespec_compare(atime, &inode->i_atime) > 0) {
  455. dout("atime %ld.%09ld -> %ld.%09ld inc\n",
  456. inode->i_atime.tv_sec,
  457. inode->i_atime.tv_nsec,
  458. atime->tv_sec, atime->tv_nsec);
  459. inode->i_atime = *atime;
  460. }
  461. } else if (issued & CEPH_CAP_FILE_EXCL) {
  462. /* we did a utimes(); ignore mds values */
  463. } else {
  464. warn = 1;
  465. }
  466. } else {
  467. /* we have no write|excl caps; whatever the MDS says is true */
  468. if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
  469. inode->i_ctime = *ctime;
  470. inode->i_mtime = *mtime;
  471. inode->i_atime = *atime;
  472. ci->i_time_warp_seq = time_warp_seq;
  473. } else {
  474. warn = 1;
  475. }
  476. }
  477. if (warn) /* time_warp_seq shouldn't go backwards */
  478. dout("%p mds time_warp_seq %llu < %u\n",
  479. inode, time_warp_seq, ci->i_time_warp_seq);
  480. }
  481. /*
  482. * Populate an inode based on info from mds. May be called on new or
  483. * existing inodes.
  484. */
  485. static int fill_inode(struct inode *inode,
  486. struct ceph_mds_reply_info_in *iinfo,
  487. struct ceph_mds_reply_dirfrag *dirinfo,
  488. struct ceph_mds_session *session,
  489. unsigned long ttl_from, int cap_fmode,
  490. struct ceph_cap_reservation *caps_reservation)
  491. {
  492. struct ceph_mds_reply_inode *info = iinfo->in;
  493. struct ceph_inode_info *ci = ceph_inode(inode);
  494. int i;
  495. int issued, implemented;
  496. struct timespec mtime, atime, ctime;
  497. u32 nsplits;
  498. struct ceph_buffer *xattr_blob = NULL;
  499. int err = 0;
  500. int queue_trunc = 0;
  501. dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
  502. inode, ceph_vinop(inode), le64_to_cpu(info->version),
  503. ci->i_version);
  504. /*
  505. * prealloc xattr data, if it looks like we'll need it. only
  506. * if len > 4 (meaning there are actually xattrs; the first 4
  507. * bytes are the xattr count).
  508. */
  509. if (iinfo->xattr_len > 4) {
  510. xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
  511. if (!xattr_blob)
  512. pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
  513. iinfo->xattr_len);
  514. }
  515. spin_lock(&inode->i_lock);
  516. /*
  517. * provided version will be odd if inode value is projected,
  518. * even if stable. skip the update if we have newer stable
  519. * info (ours>=theirs, e.g. due to racing mds replies), unless
  520. * we are getting projected (unstable) info (in which case the
  521. * version is odd, and we want ours>theirs).
  522. * us them
  523. * 2 2 skip
  524. * 3 2 skip
  525. * 3 3 update
  526. */
  527. if (le64_to_cpu(info->version) > 0 &&
  528. (ci->i_version & ~1) >= le64_to_cpu(info->version))
  529. goto no_change;
  530. issued = __ceph_caps_issued(ci, &implemented);
  531. issued |= implemented | __ceph_caps_dirty(ci);
  532. /* update inode */
  533. ci->i_version = le64_to_cpu(info->version);
  534. inode->i_version++;
  535. inode->i_rdev = le32_to_cpu(info->rdev);
  536. if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
  537. inode->i_mode = le32_to_cpu(info->mode);
  538. inode->i_uid = le32_to_cpu(info->uid);
  539. inode->i_gid = le32_to_cpu(info->gid);
  540. dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
  541. inode->i_uid, inode->i_gid);
  542. }
  543. if ((issued & CEPH_CAP_LINK_EXCL) == 0)
  544. inode->i_nlink = le32_to_cpu(info->nlink);
  545. /* be careful with mtime, atime, size */
  546. ceph_decode_timespec(&atime, &info->atime);
  547. ceph_decode_timespec(&mtime, &info->mtime);
  548. ceph_decode_timespec(&ctime, &info->ctime);
  549. queue_trunc = ceph_fill_file_size(inode, issued,
  550. le32_to_cpu(info->truncate_seq),
  551. le64_to_cpu(info->truncate_size),
  552. le64_to_cpu(info->size));
  553. ceph_fill_file_time(inode, issued,
  554. le32_to_cpu(info->time_warp_seq),
  555. &ctime, &mtime, &atime);
  556. /* only update max_size on auth cap */
  557. if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
  558. ci->i_max_size != le64_to_cpu(info->max_size)) {
  559. dout("max_size %lld -> %llu\n", ci->i_max_size,
  560. le64_to_cpu(info->max_size));
  561. ci->i_max_size = le64_to_cpu(info->max_size);
  562. }
  563. ci->i_layout = info->layout;
  564. inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
  565. /* xattrs */
  566. /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
  567. if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
  568. le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
  569. if (ci->i_xattrs.blob)
  570. ceph_buffer_put(ci->i_xattrs.blob);
  571. ci->i_xattrs.blob = xattr_blob;
  572. if (xattr_blob)
  573. memcpy(ci->i_xattrs.blob->vec.iov_base,
  574. iinfo->xattr_data, iinfo->xattr_len);
  575. ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
  576. xattr_blob = NULL;
  577. }
  578. inode->i_mapping->a_ops = &ceph_aops;
  579. inode->i_mapping->backing_dev_info =
  580. &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
  581. switch (inode->i_mode & S_IFMT) {
  582. case S_IFIFO:
  583. case S_IFBLK:
  584. case S_IFCHR:
  585. case S_IFSOCK:
  586. init_special_inode(inode, inode->i_mode, inode->i_rdev);
  587. inode->i_op = &ceph_file_iops;
  588. break;
  589. case S_IFREG:
  590. inode->i_op = &ceph_file_iops;
  591. inode->i_fop = &ceph_file_fops;
  592. break;
  593. case S_IFLNK:
  594. inode->i_op = &ceph_symlink_iops;
  595. if (!ci->i_symlink) {
  596. int symlen = iinfo->symlink_len;
  597. char *sym;
  598. BUG_ON(symlen != inode->i_size);
  599. spin_unlock(&inode->i_lock);
  600. err = -ENOMEM;
  601. sym = kmalloc(symlen+1, GFP_NOFS);
  602. if (!sym)
  603. goto out;
  604. memcpy(sym, iinfo->symlink, symlen);
  605. sym[symlen] = 0;
  606. spin_lock(&inode->i_lock);
  607. if (!ci->i_symlink)
  608. ci->i_symlink = sym;
  609. else
  610. kfree(sym); /* lost a race */
  611. }
  612. break;
  613. case S_IFDIR:
  614. inode->i_op = &ceph_dir_iops;
  615. inode->i_fop = &ceph_dir_fops;
  616. ci->i_dir_layout = iinfo->dir_layout;
  617. ci->i_files = le64_to_cpu(info->files);
  618. ci->i_subdirs = le64_to_cpu(info->subdirs);
  619. ci->i_rbytes = le64_to_cpu(info->rbytes);
  620. ci->i_rfiles = le64_to_cpu(info->rfiles);
  621. ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
  622. ceph_decode_timespec(&ci->i_rctime, &info->rctime);
  623. /* set dir completion flag? */
  624. if (ci->i_files == 0 && ci->i_subdirs == 0 &&
  625. ceph_snap(inode) == CEPH_NOSNAP &&
  626. (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
  627. (issued & CEPH_CAP_FILE_EXCL) == 0 &&
  628. (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
  629. dout(" marking %p complete (empty)\n", inode);
  630. /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
  631. ci->i_max_offset = 2;
  632. }
  633. break;
  634. default:
  635. pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
  636. ceph_vinop(inode), inode->i_mode);
  637. }
  638. no_change:
  639. spin_unlock(&inode->i_lock);
  640. /* queue truncate if we saw i_size decrease */
  641. if (queue_trunc)
  642. ceph_queue_vmtruncate(inode);
  643. /* populate frag tree */
  644. /* FIXME: move me up, if/when version reflects fragtree changes */
  645. nsplits = le32_to_cpu(info->fragtree.nsplits);
  646. mutex_lock(&ci->i_fragtree_mutex);
  647. for (i = 0; i < nsplits; i++) {
  648. u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
  649. struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
  650. if (IS_ERR(frag))
  651. continue;
  652. frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
  653. dout(" frag %x split by %d\n", frag->frag, frag->split_by);
  654. }
  655. mutex_unlock(&ci->i_fragtree_mutex);
  656. /* were we issued a capability? */
  657. if (info->cap.caps) {
  658. if (ceph_snap(inode) == CEPH_NOSNAP) {
  659. ceph_add_cap(inode, session,
  660. le64_to_cpu(info->cap.cap_id),
  661. cap_fmode,
  662. le32_to_cpu(info->cap.caps),
  663. le32_to_cpu(info->cap.wanted),
  664. le32_to_cpu(info->cap.seq),
  665. le32_to_cpu(info->cap.mseq),
  666. le64_to_cpu(info->cap.realm),
  667. info->cap.flags,
  668. caps_reservation);
  669. } else {
  670. spin_lock(&inode->i_lock);
  671. dout(" %p got snap_caps %s\n", inode,
  672. ceph_cap_string(le32_to_cpu(info->cap.caps)));
  673. ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
  674. if (cap_fmode >= 0)
  675. __ceph_get_fmode(ci, cap_fmode);
  676. spin_unlock(&inode->i_lock);
  677. }
  678. } else if (cap_fmode >= 0) {
  679. pr_warning("mds issued no caps on %llx.%llx\n",
  680. ceph_vinop(inode));
  681. __ceph_get_fmode(ci, cap_fmode);
  682. }
  683. /* update delegation info? */
  684. if (dirinfo)
  685. ceph_fill_dirfrag(inode, dirinfo);
  686. err = 0;
  687. out:
  688. if (xattr_blob)
  689. ceph_buffer_put(xattr_blob);
  690. return err;
  691. }
  692. /*
  693. * caller should hold session s_mutex.
  694. */
  695. static void update_dentry_lease(struct dentry *dentry,
  696. struct ceph_mds_reply_lease *lease,
  697. struct ceph_mds_session *session,
  698. unsigned long from_time)
  699. {
  700. struct ceph_dentry_info *di = ceph_dentry(dentry);
  701. long unsigned duration = le32_to_cpu(lease->duration_ms);
  702. long unsigned ttl = from_time + (duration * HZ) / 1000;
  703. long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
  704. struct inode *dir;
  705. /* only track leases on regular dentries */
  706. if (dentry->d_op != &ceph_dentry_ops)
  707. return;
  708. spin_lock(&dentry->d_lock);
  709. dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
  710. dentry, le16_to_cpu(lease->mask), duration, ttl);
  711. /* make lease_rdcache_gen match directory */
  712. dir = dentry->d_parent->d_inode;
  713. di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
  714. if (lease->mask == 0)
  715. goto out_unlock;
  716. if (di->lease_gen == session->s_cap_gen &&
  717. time_before(ttl, dentry->d_time))
  718. goto out_unlock; /* we already have a newer lease. */
  719. if (di->lease_session && di->lease_session != session)
  720. goto out_unlock;
  721. ceph_dentry_lru_touch(dentry);
  722. if (!di->lease_session)
  723. di->lease_session = ceph_get_mds_session(session);
  724. di->lease_gen = session->s_cap_gen;
  725. di->lease_seq = le32_to_cpu(lease->seq);
  726. di->lease_renew_after = half_ttl;
  727. di->lease_renew_from = 0;
  728. dentry->d_time = ttl;
  729. out_unlock:
  730. spin_unlock(&dentry->d_lock);
  731. return;
  732. }
  733. /*
  734. * Set dentry's directory position based on the current dir's max, and
  735. * order it in d_subdirs, so that dcache_readdir behaves.
  736. */
  737. static void ceph_set_dentry_offset(struct dentry *dn)
  738. {
  739. struct dentry *dir = dn->d_parent;
  740. struct inode *inode = dn->d_parent->d_inode;
  741. struct ceph_dentry_info *di;
  742. BUG_ON(!inode);
  743. di = ceph_dentry(dn);
  744. spin_lock(&inode->i_lock);
  745. if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
  746. spin_unlock(&inode->i_lock);
  747. return;
  748. }
  749. di->offset = ceph_inode(inode)->i_max_offset++;
  750. spin_unlock(&inode->i_lock);
  751. spin_lock(&dir->d_lock);
  752. spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
  753. list_move(&dn->d_u.d_child, &dir->d_subdirs);
  754. dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
  755. dn->d_u.d_child.prev, dn->d_u.d_child.next);
  756. spin_unlock(&dn->d_lock);
  757. spin_unlock(&dir->d_lock);
  758. }
  759. /*
  760. * splice a dentry to an inode.
  761. * caller must hold directory i_mutex for this to be safe.
  762. *
  763. * we will only rehash the resulting dentry if @prehash is
  764. * true; @prehash will be set to false (for the benefit of
  765. * the caller) if we fail.
  766. */
  767. static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
  768. bool *prehash, bool set_offset)
  769. {
  770. struct dentry *realdn;
  771. BUG_ON(dn->d_inode);
  772. /* dn must be unhashed */
  773. if (!d_unhashed(dn))
  774. d_drop(dn);
  775. realdn = d_materialise_unique(dn, in);
  776. if (IS_ERR(realdn)) {
  777. pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
  778. PTR_ERR(realdn), dn, in, ceph_vinop(in));
  779. if (prehash)
  780. *prehash = false; /* don't rehash on error */
  781. dn = realdn; /* note realdn contains the error */
  782. goto out;
  783. } else if (realdn) {
  784. dout("dn %p (%d) spliced with %p (%d) "
  785. "inode %p ino %llx.%llx\n",
  786. dn, dn->d_count,
  787. realdn, realdn->d_count,
  788. realdn->d_inode, ceph_vinop(realdn->d_inode));
  789. dput(dn);
  790. dn = realdn;
  791. } else {
  792. BUG_ON(!ceph_dentry(dn));
  793. dout("dn %p attached to %p ino %llx.%llx\n",
  794. dn, dn->d_inode, ceph_vinop(dn->d_inode));
  795. }
  796. if ((!prehash || *prehash) && d_unhashed(dn))
  797. d_rehash(dn);
  798. if (set_offset)
  799. ceph_set_dentry_offset(dn);
  800. out:
  801. return dn;
  802. }
  803. /*
  804. * Incorporate results into the local cache. This is either just
  805. * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
  806. * after a lookup).
  807. *
  808. * A reply may contain
  809. * a directory inode along with a dentry.
  810. * and/or a target inode
  811. *
  812. * Called with snap_rwsem (read).
  813. */
  814. int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
  815. struct ceph_mds_session *session)
  816. {
  817. struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
  818. struct inode *in = NULL;
  819. struct ceph_mds_reply_inode *ininfo;
  820. struct ceph_vino vino;
  821. struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
  822. int i = 0;
  823. int err = 0;
  824. dout("fill_trace %p is_dentry %d is_target %d\n", req,
  825. rinfo->head->is_dentry, rinfo->head->is_target);
  826. #if 0
  827. /*
  828. * Debugging hook:
  829. *
  830. * If we resend completed ops to a recovering mds, we get no
  831. * trace. Since that is very rare, pretend this is the case
  832. * to ensure the 'no trace' handlers in the callers behave.
  833. *
  834. * Fill in inodes unconditionally to avoid breaking cap
  835. * invariants.
  836. */
  837. if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
  838. pr_info("fill_trace faking empty trace on %lld %s\n",
  839. req->r_tid, ceph_mds_op_name(rinfo->head->op));
  840. if (rinfo->head->is_dentry) {
  841. rinfo->head->is_dentry = 0;
  842. err = fill_inode(req->r_locked_dir,
  843. &rinfo->diri, rinfo->dirfrag,
  844. session, req->r_request_started, -1);
  845. }
  846. if (rinfo->head->is_target) {
  847. rinfo->head->is_target = 0;
  848. ininfo = rinfo->targeti.in;
  849. vino.ino = le64_to_cpu(ininfo->ino);
  850. vino.snap = le64_to_cpu(ininfo->snapid);
  851. in = ceph_get_inode(sb, vino);
  852. err = fill_inode(in, &rinfo->targeti, NULL,
  853. session, req->r_request_started,
  854. req->r_fmode);
  855. iput(in);
  856. }
  857. }
  858. #endif
  859. if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
  860. dout("fill_trace reply is empty!\n");
  861. if (rinfo->head->result == 0 && req->r_locked_dir)
  862. ceph_invalidate_dir_request(req);
  863. return 0;
  864. }
  865. if (rinfo->head->is_dentry) {
  866. struct inode *dir = req->r_locked_dir;
  867. err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
  868. session, req->r_request_started, -1,
  869. &req->r_caps_reservation);
  870. if (err < 0)
  871. return err;
  872. }
  873. /*
  874. * ignore null lease/binding on snapdir ENOENT, or else we
  875. * will have trouble splicing in the virtual snapdir later
  876. */
  877. if (rinfo->head->is_dentry && !req->r_aborted &&
  878. (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
  879. fsc->mount_options->snapdir_name,
  880. req->r_dentry->d_name.len))) {
  881. /*
  882. * lookup link rename : null -> possibly existing inode
  883. * mknod symlink mkdir : null -> new inode
  884. * unlink : linked -> null
  885. */
  886. struct inode *dir = req->r_locked_dir;
  887. struct dentry *dn = req->r_dentry;
  888. bool have_dir_cap, have_lease;
  889. BUG_ON(!dn);
  890. BUG_ON(!dir);
  891. BUG_ON(dn->d_parent->d_inode != dir);
  892. BUG_ON(ceph_ino(dir) !=
  893. le64_to_cpu(rinfo->diri.in->ino));
  894. BUG_ON(ceph_snap(dir) !=
  895. le64_to_cpu(rinfo->diri.in->snapid));
  896. /* do we have a lease on the whole dir? */
  897. have_dir_cap =
  898. (le32_to_cpu(rinfo->diri.in->cap.caps) &
  899. CEPH_CAP_FILE_SHARED);
  900. /* do we have a dn lease? */
  901. have_lease = have_dir_cap ||
  902. (le16_to_cpu(rinfo->dlease->mask) &
  903. CEPH_LOCK_DN);
  904. if (!have_lease)
  905. dout("fill_trace no dentry lease or dir cap\n");
  906. /* rename? */
  907. if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
  908. dout(" src %p '%.*s' dst %p '%.*s'\n",
  909. req->r_old_dentry,
  910. req->r_old_dentry->d_name.len,
  911. req->r_old_dentry->d_name.name,
  912. dn, dn->d_name.len, dn->d_name.name);
  913. dout("fill_trace doing d_move %p -> %p\n",
  914. req->r_old_dentry, dn);
  915. d_move(req->r_old_dentry, dn);
  916. dout(" src %p '%.*s' dst %p '%.*s'\n",
  917. req->r_old_dentry,
  918. req->r_old_dentry->d_name.len,
  919. req->r_old_dentry->d_name.name,
  920. dn, dn->d_name.len, dn->d_name.name);
  921. /* ensure target dentry is invalidated, despite
  922. rehashing bug in vfs_rename_dir */
  923. ceph_invalidate_dentry_lease(dn);
  924. /*
  925. * d_move() puts the renamed dentry at the end of
  926. * d_subdirs. We need to assign it an appropriate
  927. * directory offset so we can behave when holding
  928. * I_COMPLETE.
  929. */
  930. ceph_set_dentry_offset(req->r_old_dentry);
  931. dout("dn %p gets new offset %lld\n", req->r_old_dentry,
  932. ceph_dentry(req->r_old_dentry)->offset);
  933. dn = req->r_old_dentry; /* use old_dentry */
  934. in = dn->d_inode;
  935. }
  936. /* null dentry? */
  937. if (!rinfo->head->is_target) {
  938. dout("fill_trace null dentry\n");
  939. if (dn->d_inode) {
  940. dout("d_delete %p\n", dn);
  941. d_delete(dn);
  942. } else {
  943. dout("d_instantiate %p NULL\n", dn);
  944. d_instantiate(dn, NULL);
  945. if (have_lease && d_unhashed(dn))
  946. d_rehash(dn);
  947. update_dentry_lease(dn, rinfo->dlease,
  948. session,
  949. req->r_request_started);
  950. }
  951. goto done;
  952. }
  953. /* attach proper inode */
  954. ininfo = rinfo->targeti.in;
  955. vino.ino = le64_to_cpu(ininfo->ino);
  956. vino.snap = le64_to_cpu(ininfo->snapid);
  957. in = dn->d_inode;
  958. if (!in) {
  959. in = ceph_get_inode(sb, vino);
  960. if (IS_ERR(in)) {
  961. pr_err("fill_trace bad get_inode "
  962. "%llx.%llx\n", vino.ino, vino.snap);
  963. err = PTR_ERR(in);
  964. d_delete(dn);
  965. goto done;
  966. }
  967. dn = splice_dentry(dn, in, &have_lease, true);
  968. if (IS_ERR(dn)) {
  969. err = PTR_ERR(dn);
  970. goto done;
  971. }
  972. req->r_dentry = dn; /* may have spliced */
  973. igrab(in);
  974. } else if (ceph_ino(in) == vino.ino &&
  975. ceph_snap(in) == vino.snap) {
  976. igrab(in);
  977. } else {
  978. dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
  979. dn, in, ceph_ino(in), ceph_snap(in),
  980. vino.ino, vino.snap);
  981. have_lease = false;
  982. in = NULL;
  983. }
  984. if (have_lease)
  985. update_dentry_lease(dn, rinfo->dlease, session,
  986. req->r_request_started);
  987. dout(" final dn %p\n", dn);
  988. i++;
  989. } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
  990. req->r_op == CEPH_MDS_OP_MKSNAP) {
  991. struct dentry *dn = req->r_dentry;
  992. /* fill out a snapdir LOOKUPSNAP dentry */
  993. BUG_ON(!dn);
  994. BUG_ON(!req->r_locked_dir);
  995. BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
  996. ininfo = rinfo->targeti.in;
  997. vino.ino = le64_to_cpu(ininfo->ino);
  998. vino.snap = le64_to_cpu(ininfo->snapid);
  999. in = ceph_get_inode(sb, vino);
  1000. if (IS_ERR(in)) {
  1001. pr_err("fill_inode get_inode badness %llx.%llx\n",
  1002. vino.ino, vino.snap);
  1003. err = PTR_ERR(in);
  1004. d_delete(dn);
  1005. goto done;
  1006. }
  1007. dout(" linking snapped dir %p to dn %p\n", in, dn);
  1008. dn = splice_dentry(dn, in, NULL, true);
  1009. if (IS_ERR(dn)) {
  1010. err = PTR_ERR(dn);
  1011. goto done;
  1012. }
  1013. req->r_dentry = dn; /* may have spliced */
  1014. igrab(in);
  1015. rinfo->head->is_dentry = 1; /* fool notrace handlers */
  1016. }
  1017. if (rinfo->head->is_target) {
  1018. vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
  1019. vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
  1020. if (in == NULL || ceph_ino(in) != vino.ino ||
  1021. ceph_snap(in) != vino.snap) {
  1022. in = ceph_get_inode(sb, vino);
  1023. if (IS_ERR(in)) {
  1024. err = PTR_ERR(in);
  1025. goto done;
  1026. }
  1027. }
  1028. req->r_target_inode = in;
  1029. err = fill_inode(in,
  1030. &rinfo->targeti, NULL,
  1031. session, req->r_request_started,
  1032. (le32_to_cpu(rinfo->head->result) == 0) ?
  1033. req->r_fmode : -1,
  1034. &req->r_caps_reservation);
  1035. if (err < 0) {
  1036. pr_err("fill_inode badness %p %llx.%llx\n",
  1037. in, ceph_vinop(in));
  1038. goto done;
  1039. }
  1040. }
  1041. done:
  1042. dout("fill_trace done err=%d\n", err);
  1043. return err;
  1044. }
  1045. /*
  1046. * Prepopulate our cache with readdir results, leases, etc.
  1047. */
  1048. int ceph_readdir_prepopulate(struct ceph_mds_request *req,
  1049. struct ceph_mds_session *session)
  1050. {
  1051. struct dentry *parent = req->r_dentry;
  1052. struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
  1053. struct qstr dname;
  1054. struct dentry *dn;
  1055. struct inode *in;
  1056. int err = 0, i;
  1057. struct inode *snapdir = NULL;
  1058. struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
  1059. u64 frag = le32_to_cpu(rhead->args.readdir.frag);
  1060. struct ceph_dentry_info *di;
  1061. if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
  1062. snapdir = ceph_get_snapdir(parent->d_inode);
  1063. parent = d_find_alias(snapdir);
  1064. dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
  1065. rinfo->dir_nr, parent);
  1066. } else {
  1067. dout("readdir_prepopulate %d items under dn %p\n",
  1068. rinfo->dir_nr, parent);
  1069. if (rinfo->dir_dir)
  1070. ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
  1071. }
  1072. for (i = 0; i < rinfo->dir_nr; i++) {
  1073. struct ceph_vino vino;
  1074. dname.name = rinfo->dir_dname[i];
  1075. dname.len = rinfo->dir_dname_len[i];
  1076. dname.hash = full_name_hash(dname.name, dname.len);
  1077. vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
  1078. vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
  1079. retry_lookup:
  1080. dn = d_lookup(parent, &dname);
  1081. dout("d_lookup on parent=%p name=%.*s got %p\n",
  1082. parent, dname.len, dname.name, dn);
  1083. if (!dn) {
  1084. dn = d_alloc(parent, &dname);
  1085. dout("d_alloc %p '%.*s' = %p\n", parent,
  1086. dname.len, dname.name, dn);
  1087. if (dn == NULL) {
  1088. dout("d_alloc badness\n");
  1089. err = -ENOMEM;
  1090. goto out;
  1091. }
  1092. err = ceph_init_dentry(dn);
  1093. if (err < 0) {
  1094. dput(dn);
  1095. goto out;
  1096. }
  1097. } else if (dn->d_inode &&
  1098. (ceph_ino(dn->d_inode) != vino.ino ||
  1099. ceph_snap(dn->d_inode) != vino.snap)) {
  1100. dout(" dn %p points to wrong inode %p\n",
  1101. dn, dn->d_inode);
  1102. d_delete(dn);
  1103. dput(dn);
  1104. goto retry_lookup;
  1105. } else {
  1106. /* reorder parent's d_subdirs */
  1107. spin_lock(&parent->d_lock);
  1108. spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
  1109. list_move(&dn->d_u.d_child, &parent->d_subdirs);
  1110. spin_unlock(&dn->d_lock);
  1111. spin_unlock(&parent->d_lock);
  1112. }
  1113. di = dn->d_fsdata;
  1114. di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
  1115. /* inode */
  1116. if (dn->d_inode) {
  1117. in = dn->d_inode;
  1118. } else {
  1119. in = ceph_get_inode(parent->d_sb, vino);
  1120. if (IS_ERR(in)) {
  1121. dout("new_inode badness\n");
  1122. d_delete(dn);
  1123. dput(dn);
  1124. err = PTR_ERR(in);
  1125. goto out;
  1126. }
  1127. dn = splice_dentry(dn, in, NULL, false);
  1128. if (IS_ERR(dn))
  1129. dn = NULL;
  1130. }
  1131. if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
  1132. req->r_request_started, -1,
  1133. &req->r_caps_reservation) < 0) {
  1134. pr_err("fill_inode badness on %p\n", in);
  1135. goto next_item;
  1136. }
  1137. if (dn)
  1138. update_dentry_lease(dn, rinfo->dir_dlease[i],
  1139. req->r_session,
  1140. req->r_request_started);
  1141. next_item:
  1142. if (dn)
  1143. dput(dn);
  1144. }
  1145. req->r_did_prepopulate = true;
  1146. out:
  1147. if (snapdir) {
  1148. iput(snapdir);
  1149. dput(parent);
  1150. }
  1151. dout("readdir_prepopulate done\n");
  1152. return err;
  1153. }
  1154. int ceph_inode_set_size(struct inode *inode, loff_t size)
  1155. {
  1156. struct ceph_inode_info *ci = ceph_inode(inode);
  1157. int ret = 0;
  1158. spin_lock(&inode->i_lock);
  1159. dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
  1160. inode->i_size = size;
  1161. inode->i_blocks = (size + (1 << 9) - 1) >> 9;
  1162. /* tell the MDS if we are approaching max_size */
  1163. if ((size << 1) >= ci->i_max_size &&
  1164. (ci->i_reported_size << 1) < ci->i_max_size)
  1165. ret = 1;
  1166. spin_unlock(&inode->i_lock);
  1167. return ret;
  1168. }
  1169. /*
  1170. * Write back inode data in a worker thread. (This can't be done
  1171. * in the message handler context.)
  1172. */
  1173. void ceph_queue_writeback(struct inode *inode)
  1174. {
  1175. if (queue_work(ceph_inode_to_client(inode)->wb_wq,
  1176. &ceph_inode(inode)->i_wb_work)) {
  1177. dout("ceph_queue_writeback %p\n", inode);
  1178. igrab(inode);
  1179. } else {
  1180. dout("ceph_queue_writeback %p failed\n", inode);
  1181. }
  1182. }
  1183. static void ceph_writeback_work(struct work_struct *work)
  1184. {
  1185. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1186. i_wb_work);
  1187. struct inode *inode = &ci->vfs_inode;
  1188. dout("writeback %p\n", inode);
  1189. filemap_fdatawrite(&inode->i_data);
  1190. iput(inode);
  1191. }
  1192. /*
  1193. * queue an async invalidation
  1194. */
  1195. void ceph_queue_invalidate(struct inode *inode)
  1196. {
  1197. if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
  1198. &ceph_inode(inode)->i_pg_inv_work)) {
  1199. dout("ceph_queue_invalidate %p\n", inode);
  1200. igrab(inode);
  1201. } else {
  1202. dout("ceph_queue_invalidate %p failed\n", inode);
  1203. }
  1204. }
  1205. /*
  1206. * invalidate any pages that are not dirty or under writeback. this
  1207. * includes pages that are clean and mapped.
  1208. */
  1209. static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
  1210. {
  1211. struct pagevec pvec;
  1212. pgoff_t next = 0;
  1213. int i;
  1214. pagevec_init(&pvec, 0);
  1215. while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  1216. for (i = 0; i < pagevec_count(&pvec); i++) {
  1217. struct page *page = pvec.pages[i];
  1218. pgoff_t index;
  1219. int skip_page =
  1220. (PageDirty(page) || PageWriteback(page));
  1221. if (!skip_page)
  1222. skip_page = !trylock_page(page);
  1223. /*
  1224. * We really shouldn't be looking at the ->index of an
  1225. * unlocked page. But we're not allowed to lock these
  1226. * pages. So we rely upon nobody altering the ->index
  1227. * of this (pinned-by-us) page.
  1228. */
  1229. index = page->index;
  1230. if (index > next)
  1231. next = index;
  1232. next++;
  1233. if (skip_page)
  1234. continue;
  1235. generic_error_remove_page(mapping, page);
  1236. unlock_page(page);
  1237. }
  1238. pagevec_release(&pvec);
  1239. cond_resched();
  1240. }
  1241. }
  1242. /*
  1243. * Invalidate inode pages in a worker thread. (This can't be done
  1244. * in the message handler context.)
  1245. */
  1246. static void ceph_invalidate_work(struct work_struct *work)
  1247. {
  1248. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1249. i_pg_inv_work);
  1250. struct inode *inode = &ci->vfs_inode;
  1251. u32 orig_gen;
  1252. int check = 0;
  1253. spin_lock(&inode->i_lock);
  1254. dout("invalidate_pages %p gen %d revoking %d\n", inode,
  1255. ci->i_rdcache_gen, ci->i_rdcache_revoking);
  1256. if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
  1257. /* nevermind! */
  1258. spin_unlock(&inode->i_lock);
  1259. goto out;
  1260. }
  1261. orig_gen = ci->i_rdcache_gen;
  1262. spin_unlock(&inode->i_lock);
  1263. ceph_invalidate_nondirty_pages(inode->i_mapping);
  1264. spin_lock(&inode->i_lock);
  1265. if (orig_gen == ci->i_rdcache_gen &&
  1266. orig_gen == ci->i_rdcache_revoking) {
  1267. dout("invalidate_pages %p gen %d successful\n", inode,
  1268. ci->i_rdcache_gen);
  1269. ci->i_rdcache_revoking--;
  1270. check = 1;
  1271. } else {
  1272. dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
  1273. inode, orig_gen, ci->i_rdcache_gen,
  1274. ci->i_rdcache_revoking);
  1275. }
  1276. spin_unlock(&inode->i_lock);
  1277. if (check)
  1278. ceph_check_caps(ci, 0, NULL);
  1279. out:
  1280. iput(inode);
  1281. }
  1282. /*
  1283. * called by trunc_wq; take i_mutex ourselves
  1284. *
  1285. * We also truncate in a separate thread as well.
  1286. */
  1287. static void ceph_vmtruncate_work(struct work_struct *work)
  1288. {
  1289. struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
  1290. i_vmtruncate_work);
  1291. struct inode *inode = &ci->vfs_inode;
  1292. dout("vmtruncate_work %p\n", inode);
  1293. mutex_lock(&inode->i_mutex);
  1294. __ceph_do_pending_vmtruncate(inode);
  1295. mutex_unlock(&inode->i_mutex);
  1296. iput(inode);
  1297. }
  1298. /*
  1299. * Queue an async vmtruncate. If we fail to queue work, we will handle
  1300. * the truncation the next time we call __ceph_do_pending_vmtruncate.
  1301. */
  1302. void ceph_queue_vmtruncate(struct inode *inode)
  1303. {
  1304. struct ceph_inode_info *ci = ceph_inode(inode);
  1305. if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
  1306. &ci->i_vmtruncate_work)) {
  1307. dout("ceph_queue_vmtruncate %p\n", inode);
  1308. igrab(inode);
  1309. } else {
  1310. dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
  1311. inode, ci->i_truncate_pending);
  1312. }
  1313. }
  1314. /*
  1315. * called with i_mutex held.
  1316. *
  1317. * Make sure any pending truncation is applied before doing anything
  1318. * that may depend on it.
  1319. */
  1320. void __ceph_do_pending_vmtruncate(struct inode *inode)
  1321. {
  1322. struct ceph_inode_info *ci = ceph_inode(inode);
  1323. u64 to;
  1324. int wrbuffer_refs, wake = 0;
  1325. retry:
  1326. spin_lock(&inode->i_lock);
  1327. if (ci->i_truncate_pending == 0) {
  1328. dout("__do_pending_vmtruncate %p none pending\n", inode);
  1329. spin_unlock(&inode->i_lock);
  1330. return;
  1331. }
  1332. /*
  1333. * make sure any dirty snapped pages are flushed before we
  1334. * possibly truncate them.. so write AND block!
  1335. */
  1336. if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
  1337. dout("__do_pending_vmtruncate %p flushing snaps first\n",
  1338. inode);
  1339. spin_unlock(&inode->i_lock);
  1340. filemap_write_and_wait_range(&inode->i_data, 0,
  1341. inode->i_sb->s_maxbytes);
  1342. goto retry;
  1343. }
  1344. to = ci->i_truncate_size;
  1345. wrbuffer_refs = ci->i_wrbuffer_ref;
  1346. dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
  1347. ci->i_truncate_pending, to);
  1348. spin_unlock(&inode->i_lock);
  1349. truncate_inode_pages(inode->i_mapping, to);
  1350. spin_lock(&inode->i_lock);
  1351. ci->i_truncate_pending--;
  1352. if (ci->i_truncate_pending == 0)
  1353. wake = 1;
  1354. spin_unlock(&inode->i_lock);
  1355. if (wrbuffer_refs == 0)
  1356. ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
  1357. if (wake)
  1358. wake_up_all(&ci->i_cap_wq);
  1359. }
  1360. /*
  1361. * symlinks
  1362. */
  1363. static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
  1364. {
  1365. struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
  1366. nd_set_link(nd, ci->i_symlink);
  1367. return NULL;
  1368. }
  1369. static const struct inode_operations ceph_symlink_iops = {
  1370. .readlink = generic_readlink,
  1371. .follow_link = ceph_sym_follow_link,
  1372. };
  1373. /*
  1374. * setattr
  1375. */
  1376. int ceph_setattr(struct dentry *dentry, struct iattr *attr)
  1377. {
  1378. struct inode *inode = dentry->d_inode;
  1379. struct ceph_inode_info *ci = ceph_inode(inode);
  1380. struct inode *parent_inode = dentry->d_parent->d_inode;
  1381. const unsigned int ia_valid = attr->ia_valid;
  1382. struct ceph_mds_request *req;
  1383. struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
  1384. int issued;
  1385. int release = 0, dirtied = 0;
  1386. int mask = 0;
  1387. int err = 0;
  1388. if (ceph_snap(inode) != CEPH_NOSNAP)
  1389. return -EROFS;
  1390. __ceph_do_pending_vmtruncate(inode);
  1391. err = inode_change_ok(inode, attr);
  1392. if (err != 0)
  1393. return err;
  1394. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
  1395. USE_AUTH_MDS);
  1396. if (IS_ERR(req))
  1397. return PTR_ERR(req);
  1398. spin_lock(&inode->i_lock);
  1399. issued = __ceph_caps_issued(ci, NULL);
  1400. dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
  1401. if (ia_valid & ATTR_UID) {
  1402. dout("setattr %p uid %d -> %d\n", inode,
  1403. inode->i_uid, attr->ia_uid);
  1404. if (issued & CEPH_CAP_AUTH_EXCL) {
  1405. inode->i_uid = attr->ia_uid;
  1406. dirtied |= CEPH_CAP_AUTH_EXCL;
  1407. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1408. attr->ia_uid != inode->i_uid) {
  1409. req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
  1410. mask |= CEPH_SETATTR_UID;
  1411. release |= CEPH_CAP_AUTH_SHARED;
  1412. }
  1413. }
  1414. if (ia_valid & ATTR_GID) {
  1415. dout("setattr %p gid %d -> %d\n", inode,
  1416. inode->i_gid, attr->ia_gid);
  1417. if (issued & CEPH_CAP_AUTH_EXCL) {
  1418. inode->i_gid = attr->ia_gid;
  1419. dirtied |= CEPH_CAP_AUTH_EXCL;
  1420. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1421. attr->ia_gid != inode->i_gid) {
  1422. req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
  1423. mask |= CEPH_SETATTR_GID;
  1424. release |= CEPH_CAP_AUTH_SHARED;
  1425. }
  1426. }
  1427. if (ia_valid & ATTR_MODE) {
  1428. dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
  1429. attr->ia_mode);
  1430. if (issued & CEPH_CAP_AUTH_EXCL) {
  1431. inode->i_mode = attr->ia_mode;
  1432. dirtied |= CEPH_CAP_AUTH_EXCL;
  1433. } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
  1434. attr->ia_mode != inode->i_mode) {
  1435. req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
  1436. mask |= CEPH_SETATTR_MODE;
  1437. release |= CEPH_CAP_AUTH_SHARED;
  1438. }
  1439. }
  1440. if (ia_valid & ATTR_ATIME) {
  1441. dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
  1442. inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
  1443. attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
  1444. if (issued & CEPH_CAP_FILE_EXCL) {
  1445. ci->i_time_warp_seq++;
  1446. inode->i_atime = attr->ia_atime;
  1447. dirtied |= CEPH_CAP_FILE_EXCL;
  1448. } else if ((issued & CEPH_CAP_FILE_WR) &&
  1449. timespec_compare(&inode->i_atime,
  1450. &attr->ia_atime) < 0) {
  1451. inode->i_atime = attr->ia_atime;
  1452. dirtied |= CEPH_CAP_FILE_WR;
  1453. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1454. !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
  1455. ceph_encode_timespec(&req->r_args.setattr.atime,
  1456. &attr->ia_atime);
  1457. mask |= CEPH_SETATTR_ATIME;
  1458. release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
  1459. CEPH_CAP_FILE_WR;
  1460. }
  1461. }
  1462. if (ia_valid & ATTR_MTIME) {
  1463. dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
  1464. inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
  1465. attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
  1466. if (issued & CEPH_CAP_FILE_EXCL) {
  1467. ci->i_time_warp_seq++;
  1468. inode->i_mtime = attr->ia_mtime;
  1469. dirtied |= CEPH_CAP_FILE_EXCL;
  1470. } else if ((issued & CEPH_CAP_FILE_WR) &&
  1471. timespec_compare(&inode->i_mtime,
  1472. &attr->ia_mtime) < 0) {
  1473. inode->i_mtime = attr->ia_mtime;
  1474. dirtied |= CEPH_CAP_FILE_WR;
  1475. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1476. !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
  1477. ceph_encode_timespec(&req->r_args.setattr.mtime,
  1478. &attr->ia_mtime);
  1479. mask |= CEPH_SETATTR_MTIME;
  1480. release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
  1481. CEPH_CAP_FILE_WR;
  1482. }
  1483. }
  1484. if (ia_valid & ATTR_SIZE) {
  1485. dout("setattr %p size %lld -> %lld\n", inode,
  1486. inode->i_size, attr->ia_size);
  1487. if (attr->ia_size > inode->i_sb->s_maxbytes) {
  1488. err = -EINVAL;
  1489. goto out;
  1490. }
  1491. if ((issued & CEPH_CAP_FILE_EXCL) &&
  1492. attr->ia_size > inode->i_size) {
  1493. inode->i_size = attr->ia_size;
  1494. inode->i_blocks =
  1495. (attr->ia_size + (1 << 9) - 1) >> 9;
  1496. inode->i_ctime = attr->ia_ctime;
  1497. ci->i_reported_size = attr->ia_size;
  1498. dirtied |= CEPH_CAP_FILE_EXCL;
  1499. } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
  1500. attr->ia_size != inode->i_size) {
  1501. req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
  1502. req->r_args.setattr.old_size =
  1503. cpu_to_le64(inode->i_size);
  1504. mask |= CEPH_SETATTR_SIZE;
  1505. release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
  1506. CEPH_CAP_FILE_WR;
  1507. }
  1508. }
  1509. /* these do nothing */
  1510. if (ia_valid & ATTR_CTIME) {
  1511. bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
  1512. ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
  1513. dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
  1514. inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
  1515. attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
  1516. only ? "ctime only" : "ignored");
  1517. inode->i_ctime = attr->ia_ctime;
  1518. if (only) {
  1519. /*
  1520. * if kernel wants to dirty ctime but nothing else,
  1521. * we need to choose a cap to dirty under, or do
  1522. * a almost-no-op setattr
  1523. */
  1524. if (issued & CEPH_CAP_AUTH_EXCL)
  1525. dirtied |= CEPH_CAP_AUTH_EXCL;
  1526. else if (issued & CEPH_CAP_FILE_EXCL)
  1527. dirtied |= CEPH_CAP_FILE_EXCL;
  1528. else if (issued & CEPH_CAP_XATTR_EXCL)
  1529. dirtied |= CEPH_CAP_XATTR_EXCL;
  1530. else
  1531. mask |= CEPH_SETATTR_CTIME;
  1532. }
  1533. }
  1534. if (ia_valid & ATTR_FILE)
  1535. dout("setattr %p ATTR_FILE ... hrm!\n", inode);
  1536. if (dirtied) {
  1537. __ceph_mark_dirty_caps(ci, dirtied);
  1538. inode->i_ctime = CURRENT_TIME;
  1539. }
  1540. release &= issued;
  1541. spin_unlock(&inode->i_lock);
  1542. if (mask) {
  1543. req->r_inode = igrab(inode);
  1544. req->r_inode_drop = release;
  1545. req->r_args.setattr.mask = cpu_to_le32(mask);
  1546. req->r_num_caps = 1;
  1547. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  1548. }
  1549. dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
  1550. ceph_cap_string(dirtied), mask);
  1551. ceph_mdsc_put_request(req);
  1552. __ceph_do_pending_vmtruncate(inode);
  1553. return err;
  1554. out:
  1555. spin_unlock(&inode->i_lock);
  1556. ceph_mdsc_put_request(req);
  1557. return err;
  1558. }
  1559. /*
  1560. * Verify that we have a lease on the given mask. If not,
  1561. * do a getattr against an mds.
  1562. */
  1563. int ceph_do_getattr(struct inode *inode, int mask)
  1564. {
  1565. struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
  1566. struct ceph_mds_client *mdsc = fsc->mdsc;
  1567. struct ceph_mds_request *req;
  1568. int err;
  1569. if (ceph_snap(inode) == CEPH_SNAPDIR) {
  1570. dout("do_getattr inode %p SNAPDIR\n", inode);
  1571. return 0;
  1572. }
  1573. dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
  1574. if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
  1575. return 0;
  1576. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
  1577. if (IS_ERR(req))
  1578. return PTR_ERR(req);
  1579. req->r_inode = igrab(inode);
  1580. req->r_num_caps = 1;
  1581. req->r_args.getattr.mask = cpu_to_le32(mask);
  1582. err = ceph_mdsc_do_request(mdsc, NULL, req);
  1583. ceph_mdsc_put_request(req);
  1584. dout("do_getattr result=%d\n", err);
  1585. return err;
  1586. }
  1587. /*
  1588. * Check inode permissions. We verify we have a valid value for
  1589. * the AUTH cap, then call the generic handler.
  1590. */
  1591. int ceph_permission(struct inode *inode, int mask, unsigned int flags)
  1592. {
  1593. int err;
  1594. if (flags & IPERM_FLAG_RCU)
  1595. return -ECHILD;
  1596. err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
  1597. if (!err)
  1598. err = generic_permission(inode, mask, flags, NULL);
  1599. return err;
  1600. }
  1601. /*
  1602. * Get all attributes. Hopefully somedata we'll have a statlite()
  1603. * and can limit the fields we require to be accurate.
  1604. */
  1605. int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
  1606. struct kstat *stat)
  1607. {
  1608. struct inode *inode = dentry->d_inode;
  1609. struct ceph_inode_info *ci = ceph_inode(inode);
  1610. int err;
  1611. err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
  1612. if (!err) {
  1613. generic_fillattr(inode, stat);
  1614. stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
  1615. if (ceph_snap(inode) != CEPH_NOSNAP)
  1616. stat->dev = ceph_snap(inode);
  1617. else
  1618. stat->dev = 0;
  1619. if (S_ISDIR(inode->i_mode)) {
  1620. if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
  1621. RBYTES))
  1622. stat->size = ci->i_rbytes;
  1623. else
  1624. stat->size = ci->i_files + ci->i_subdirs;
  1625. stat->blocks = 0;
  1626. stat->blksize = 65536;
  1627. }
  1628. }
  1629. return err;
  1630. }