xattr.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. #include <linux/ceph/ceph_debug.h>
  2. #include "super.h"
  3. #include "mds_client.h"
  4. #include <linux/ceph/decode.h>
  5. #include <linux/xattr.h>
  6. #include <linux/slab.h>
  7. static bool ceph_is_valid_xattr(const char *name)
  8. {
  9. return !strncmp(name, "ceph.", 5) ||
  10. !strncmp(name, XATTR_SECURITY_PREFIX,
  11. XATTR_SECURITY_PREFIX_LEN) ||
  12. !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
  13. !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
  14. }
  15. /*
  16. * These define virtual xattrs exposing the recursive directory
  17. * statistics and layout metadata.
  18. */
  19. struct ceph_vxattr_cb {
  20. bool readonly;
  21. char *name;
  22. size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
  23. size_t size);
  24. };
  25. /* directories */
  26. static size_t ceph_vxattrcb_entries(struct ceph_inode_info *ci, char *val,
  27. size_t size)
  28. {
  29. return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
  30. }
  31. static size_t ceph_vxattrcb_files(struct ceph_inode_info *ci, char *val,
  32. size_t size)
  33. {
  34. return snprintf(val, size, "%lld", ci->i_files);
  35. }
  36. static size_t ceph_vxattrcb_subdirs(struct ceph_inode_info *ci, char *val,
  37. size_t size)
  38. {
  39. return snprintf(val, size, "%lld", ci->i_subdirs);
  40. }
  41. static size_t ceph_vxattrcb_rentries(struct ceph_inode_info *ci, char *val,
  42. size_t size)
  43. {
  44. return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
  45. }
  46. static size_t ceph_vxattrcb_rfiles(struct ceph_inode_info *ci, char *val,
  47. size_t size)
  48. {
  49. return snprintf(val, size, "%lld", ci->i_rfiles);
  50. }
  51. static size_t ceph_vxattrcb_rsubdirs(struct ceph_inode_info *ci, char *val,
  52. size_t size)
  53. {
  54. return snprintf(val, size, "%lld", ci->i_rsubdirs);
  55. }
  56. static size_t ceph_vxattrcb_rbytes(struct ceph_inode_info *ci, char *val,
  57. size_t size)
  58. {
  59. return snprintf(val, size, "%lld", ci->i_rbytes);
  60. }
  61. static size_t ceph_vxattrcb_rctime(struct ceph_inode_info *ci, char *val,
  62. size_t size)
  63. {
  64. return snprintf(val, size, "%ld.%ld", (long)ci->i_rctime.tv_sec,
  65. (long)ci->i_rctime.tv_nsec);
  66. }
  67. static struct ceph_vxattr_cb ceph_dir_vxattrs[] = {
  68. { true, "ceph.dir.entries", ceph_vxattrcb_entries},
  69. { true, "ceph.dir.files", ceph_vxattrcb_files},
  70. { true, "ceph.dir.subdirs", ceph_vxattrcb_subdirs},
  71. { true, "ceph.dir.rentries", ceph_vxattrcb_rentries},
  72. { true, "ceph.dir.rfiles", ceph_vxattrcb_rfiles},
  73. { true, "ceph.dir.rsubdirs", ceph_vxattrcb_rsubdirs},
  74. { true, "ceph.dir.rbytes", ceph_vxattrcb_rbytes},
  75. { true, "ceph.dir.rctime", ceph_vxattrcb_rctime},
  76. { true, NULL, NULL }
  77. };
  78. /* files */
  79. static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
  80. size_t size)
  81. {
  82. int ret;
  83. ret = snprintf(val, size,
  84. "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
  85. (unsigned long long)ceph_file_layout_su(ci->i_layout),
  86. (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
  87. (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
  88. if (ceph_file_layout_pg_preferred(ci->i_layout))
  89. ret += snprintf(val + ret, size, "preferred_osd=%lld\n",
  90. (unsigned long long)ceph_file_layout_pg_preferred(
  91. ci->i_layout));
  92. return ret;
  93. }
  94. static struct ceph_vxattr_cb ceph_file_vxattrs[] = {
  95. { true, "ceph.file.layout", ceph_vxattrcb_layout},
  96. /* The following extended attribute name is deprecated */
  97. { true, "ceph.layout", ceph_vxattrcb_layout},
  98. { true, NULL, NULL }
  99. };
  100. static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode)
  101. {
  102. if (S_ISDIR(inode->i_mode))
  103. return ceph_dir_vxattrs;
  104. else if (S_ISREG(inode->i_mode))
  105. return ceph_file_vxattrs;
  106. return NULL;
  107. }
  108. static struct ceph_vxattr_cb *ceph_match_vxattr(struct ceph_vxattr_cb *vxattr,
  109. const char *name)
  110. {
  111. do {
  112. if (strcmp(vxattr->name, name) == 0)
  113. return vxattr;
  114. vxattr++;
  115. } while (vxattr->name);
  116. return NULL;
  117. }
  118. static int __set_xattr(struct ceph_inode_info *ci,
  119. const char *name, int name_len,
  120. const char *val, int val_len,
  121. int dirty,
  122. int should_free_name, int should_free_val,
  123. struct ceph_inode_xattr **newxattr)
  124. {
  125. struct rb_node **p;
  126. struct rb_node *parent = NULL;
  127. struct ceph_inode_xattr *xattr = NULL;
  128. int c;
  129. int new = 0;
  130. p = &ci->i_xattrs.index.rb_node;
  131. while (*p) {
  132. parent = *p;
  133. xattr = rb_entry(parent, struct ceph_inode_xattr, node);
  134. c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
  135. if (c < 0)
  136. p = &(*p)->rb_left;
  137. else if (c > 0)
  138. p = &(*p)->rb_right;
  139. else {
  140. if (name_len == xattr->name_len)
  141. break;
  142. else if (name_len < xattr->name_len)
  143. p = &(*p)->rb_left;
  144. else
  145. p = &(*p)->rb_right;
  146. }
  147. xattr = NULL;
  148. }
  149. if (!xattr) {
  150. new = 1;
  151. xattr = *newxattr;
  152. xattr->name = name;
  153. xattr->name_len = name_len;
  154. xattr->should_free_name = should_free_name;
  155. ci->i_xattrs.count++;
  156. dout("__set_xattr count=%d\n", ci->i_xattrs.count);
  157. } else {
  158. kfree(*newxattr);
  159. *newxattr = NULL;
  160. if (xattr->should_free_val)
  161. kfree((void *)xattr->val);
  162. if (should_free_name) {
  163. kfree((void *)name);
  164. name = xattr->name;
  165. }
  166. ci->i_xattrs.names_size -= xattr->name_len;
  167. ci->i_xattrs.vals_size -= xattr->val_len;
  168. }
  169. ci->i_xattrs.names_size += name_len;
  170. ci->i_xattrs.vals_size += val_len;
  171. if (val)
  172. xattr->val = val;
  173. else
  174. xattr->val = "";
  175. xattr->val_len = val_len;
  176. xattr->dirty = dirty;
  177. xattr->should_free_val = (val && should_free_val);
  178. if (new) {
  179. rb_link_node(&xattr->node, parent, p);
  180. rb_insert_color(&xattr->node, &ci->i_xattrs.index);
  181. dout("__set_xattr_val p=%p\n", p);
  182. }
  183. dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
  184. ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
  185. return 0;
  186. }
  187. static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
  188. const char *name)
  189. {
  190. struct rb_node **p;
  191. struct rb_node *parent = NULL;
  192. struct ceph_inode_xattr *xattr = NULL;
  193. int name_len = strlen(name);
  194. int c;
  195. p = &ci->i_xattrs.index.rb_node;
  196. while (*p) {
  197. parent = *p;
  198. xattr = rb_entry(parent, struct ceph_inode_xattr, node);
  199. c = strncmp(name, xattr->name, xattr->name_len);
  200. if (c == 0 && name_len > xattr->name_len)
  201. c = 1;
  202. if (c < 0)
  203. p = &(*p)->rb_left;
  204. else if (c > 0)
  205. p = &(*p)->rb_right;
  206. else {
  207. dout("__get_xattr %s: found %.*s\n", name,
  208. xattr->val_len, xattr->val);
  209. return xattr;
  210. }
  211. }
  212. dout("__get_xattr %s: not found\n", name);
  213. return NULL;
  214. }
  215. static void __free_xattr(struct ceph_inode_xattr *xattr)
  216. {
  217. BUG_ON(!xattr);
  218. if (xattr->should_free_name)
  219. kfree((void *)xattr->name);
  220. if (xattr->should_free_val)
  221. kfree((void *)xattr->val);
  222. kfree(xattr);
  223. }
  224. static int __remove_xattr(struct ceph_inode_info *ci,
  225. struct ceph_inode_xattr *xattr)
  226. {
  227. if (!xattr)
  228. return -EOPNOTSUPP;
  229. rb_erase(&xattr->node, &ci->i_xattrs.index);
  230. if (xattr->should_free_name)
  231. kfree((void *)xattr->name);
  232. if (xattr->should_free_val)
  233. kfree((void *)xattr->val);
  234. ci->i_xattrs.names_size -= xattr->name_len;
  235. ci->i_xattrs.vals_size -= xattr->val_len;
  236. ci->i_xattrs.count--;
  237. kfree(xattr);
  238. return 0;
  239. }
  240. static int __remove_xattr_by_name(struct ceph_inode_info *ci,
  241. const char *name)
  242. {
  243. struct rb_node **p;
  244. struct ceph_inode_xattr *xattr;
  245. int err;
  246. p = &ci->i_xattrs.index.rb_node;
  247. xattr = __get_xattr(ci, name);
  248. err = __remove_xattr(ci, xattr);
  249. return err;
  250. }
  251. static char *__copy_xattr_names(struct ceph_inode_info *ci,
  252. char *dest)
  253. {
  254. struct rb_node *p;
  255. struct ceph_inode_xattr *xattr = NULL;
  256. p = rb_first(&ci->i_xattrs.index);
  257. dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
  258. while (p) {
  259. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  260. memcpy(dest, xattr->name, xattr->name_len);
  261. dest[xattr->name_len] = '\0';
  262. dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
  263. xattr->name_len, ci->i_xattrs.names_size);
  264. dest += xattr->name_len + 1;
  265. p = rb_next(p);
  266. }
  267. return dest;
  268. }
  269. void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
  270. {
  271. struct rb_node *p, *tmp;
  272. struct ceph_inode_xattr *xattr = NULL;
  273. p = rb_first(&ci->i_xattrs.index);
  274. dout("__ceph_destroy_xattrs p=%p\n", p);
  275. while (p) {
  276. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  277. tmp = p;
  278. p = rb_next(tmp);
  279. dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
  280. xattr->name_len, xattr->name);
  281. rb_erase(tmp, &ci->i_xattrs.index);
  282. __free_xattr(xattr);
  283. }
  284. ci->i_xattrs.names_size = 0;
  285. ci->i_xattrs.vals_size = 0;
  286. ci->i_xattrs.index_version = 0;
  287. ci->i_xattrs.count = 0;
  288. ci->i_xattrs.index = RB_ROOT;
  289. }
  290. static int __build_xattrs(struct inode *inode)
  291. __releases(ci->i_ceph_lock)
  292. __acquires(ci->i_ceph_lock)
  293. {
  294. u32 namelen;
  295. u32 numattr = 0;
  296. void *p, *end;
  297. u32 len;
  298. const char *name, *val;
  299. struct ceph_inode_info *ci = ceph_inode(inode);
  300. int xattr_version;
  301. struct ceph_inode_xattr **xattrs = NULL;
  302. int err = 0;
  303. int i;
  304. dout("__build_xattrs() len=%d\n",
  305. ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
  306. if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
  307. return 0; /* already built */
  308. __ceph_destroy_xattrs(ci);
  309. start:
  310. /* updated internal xattr rb tree */
  311. if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
  312. p = ci->i_xattrs.blob->vec.iov_base;
  313. end = p + ci->i_xattrs.blob->vec.iov_len;
  314. ceph_decode_32_safe(&p, end, numattr, bad);
  315. xattr_version = ci->i_xattrs.version;
  316. spin_unlock(&ci->i_ceph_lock);
  317. xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
  318. GFP_NOFS);
  319. err = -ENOMEM;
  320. if (!xattrs)
  321. goto bad_lock;
  322. memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
  323. for (i = 0; i < numattr; i++) {
  324. xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
  325. GFP_NOFS);
  326. if (!xattrs[i])
  327. goto bad_lock;
  328. }
  329. spin_lock(&ci->i_ceph_lock);
  330. if (ci->i_xattrs.version != xattr_version) {
  331. /* lost a race, retry */
  332. for (i = 0; i < numattr; i++)
  333. kfree(xattrs[i]);
  334. kfree(xattrs);
  335. goto start;
  336. }
  337. err = -EIO;
  338. while (numattr--) {
  339. ceph_decode_32_safe(&p, end, len, bad);
  340. namelen = len;
  341. name = p;
  342. p += len;
  343. ceph_decode_32_safe(&p, end, len, bad);
  344. val = p;
  345. p += len;
  346. err = __set_xattr(ci, name, namelen, val, len,
  347. 0, 0, 0, &xattrs[numattr]);
  348. if (err < 0)
  349. goto bad;
  350. }
  351. kfree(xattrs);
  352. }
  353. ci->i_xattrs.index_version = ci->i_xattrs.version;
  354. ci->i_xattrs.dirty = false;
  355. return err;
  356. bad_lock:
  357. spin_lock(&ci->i_ceph_lock);
  358. bad:
  359. if (xattrs) {
  360. for (i = 0; i < numattr; i++)
  361. kfree(xattrs[i]);
  362. kfree(xattrs);
  363. }
  364. ci->i_xattrs.names_size = 0;
  365. return err;
  366. }
  367. static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
  368. int val_size)
  369. {
  370. /*
  371. * 4 bytes for the length, and additional 4 bytes per each xattr name,
  372. * 4 bytes per each value
  373. */
  374. int size = 4 + ci->i_xattrs.count*(4 + 4) +
  375. ci->i_xattrs.names_size +
  376. ci->i_xattrs.vals_size;
  377. dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
  378. ci->i_xattrs.count, ci->i_xattrs.names_size,
  379. ci->i_xattrs.vals_size);
  380. if (name_size)
  381. size += 4 + 4 + name_size + val_size;
  382. return size;
  383. }
  384. /*
  385. * If there are dirty xattrs, reencode xattrs into the prealloc_blob
  386. * and swap into place.
  387. */
  388. void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
  389. {
  390. struct rb_node *p;
  391. struct ceph_inode_xattr *xattr = NULL;
  392. void *dest;
  393. dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
  394. if (ci->i_xattrs.dirty) {
  395. int need = __get_required_blob_size(ci, 0, 0);
  396. BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
  397. p = rb_first(&ci->i_xattrs.index);
  398. dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
  399. ceph_encode_32(&dest, ci->i_xattrs.count);
  400. while (p) {
  401. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  402. ceph_encode_32(&dest, xattr->name_len);
  403. memcpy(dest, xattr->name, xattr->name_len);
  404. dest += xattr->name_len;
  405. ceph_encode_32(&dest, xattr->val_len);
  406. memcpy(dest, xattr->val, xattr->val_len);
  407. dest += xattr->val_len;
  408. p = rb_next(p);
  409. }
  410. /* adjust buffer len; it may be larger than we need */
  411. ci->i_xattrs.prealloc_blob->vec.iov_len =
  412. dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
  413. if (ci->i_xattrs.blob)
  414. ceph_buffer_put(ci->i_xattrs.blob);
  415. ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
  416. ci->i_xattrs.prealloc_blob = NULL;
  417. ci->i_xattrs.dirty = false;
  418. ci->i_xattrs.version++;
  419. }
  420. }
  421. ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
  422. size_t size)
  423. {
  424. struct inode *inode = dentry->d_inode;
  425. struct ceph_inode_info *ci = ceph_inode(inode);
  426. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  427. int err;
  428. struct ceph_inode_xattr *xattr;
  429. struct ceph_vxattr_cb *vxattr = NULL;
  430. if (!ceph_is_valid_xattr(name))
  431. return -ENODATA;
  432. /* let's see if a virtual xattr was requested */
  433. if (vxattrs)
  434. vxattr = ceph_match_vxattr(vxattrs, name);
  435. spin_lock(&ci->i_ceph_lock);
  436. dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
  437. ci->i_xattrs.version, ci->i_xattrs.index_version);
  438. if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
  439. (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
  440. goto get_xattr;
  441. } else {
  442. spin_unlock(&ci->i_ceph_lock);
  443. /* get xattrs from mds (if we don't already have them) */
  444. err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
  445. if (err)
  446. return err;
  447. }
  448. spin_lock(&ci->i_ceph_lock);
  449. if (vxattr && vxattr->readonly) {
  450. err = vxattr->getxattr_cb(ci, value, size);
  451. goto out;
  452. }
  453. err = __build_xattrs(inode);
  454. if (err < 0)
  455. goto out;
  456. get_xattr:
  457. err = -ENODATA; /* == ENOATTR */
  458. xattr = __get_xattr(ci, name);
  459. if (!xattr) {
  460. if (vxattr)
  461. err = vxattr->getxattr_cb(ci, value, size);
  462. goto out;
  463. }
  464. err = -ERANGE;
  465. if (size && size < xattr->val_len)
  466. goto out;
  467. err = xattr->val_len;
  468. if (size == 0)
  469. goto out;
  470. memcpy(value, xattr->val, xattr->val_len);
  471. out:
  472. spin_unlock(&ci->i_ceph_lock);
  473. return err;
  474. }
  475. ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
  476. {
  477. struct inode *inode = dentry->d_inode;
  478. struct ceph_inode_info *ci = ceph_inode(inode);
  479. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  480. u32 vir_namelen = 0;
  481. u32 namelen;
  482. int err;
  483. u32 len;
  484. int i;
  485. spin_lock(&ci->i_ceph_lock);
  486. dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
  487. ci->i_xattrs.version, ci->i_xattrs.index_version);
  488. if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
  489. (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
  490. goto list_xattr;
  491. } else {
  492. spin_unlock(&ci->i_ceph_lock);
  493. err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
  494. if (err)
  495. return err;
  496. }
  497. spin_lock(&ci->i_ceph_lock);
  498. err = __build_xattrs(inode);
  499. if (err < 0)
  500. goto out;
  501. list_xattr:
  502. vir_namelen = 0;
  503. /* include virtual dir xattrs */
  504. if (vxattrs)
  505. for (i = 0; vxattrs[i].name; i++)
  506. vir_namelen += strlen(vxattrs[i].name) + 1;
  507. /* adding 1 byte per each variable due to the null termination */
  508. namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
  509. err = -ERANGE;
  510. if (size && namelen > size)
  511. goto out;
  512. err = namelen;
  513. if (size == 0)
  514. goto out;
  515. names = __copy_xattr_names(ci, names);
  516. /* virtual xattr names, too */
  517. if (vxattrs)
  518. for (i = 0; vxattrs[i].name; i++) {
  519. len = sprintf(names, "%s", vxattrs[i].name);
  520. names += len + 1;
  521. }
  522. out:
  523. spin_unlock(&ci->i_ceph_lock);
  524. return err;
  525. }
  526. static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
  527. const char *value, size_t size, int flags)
  528. {
  529. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  530. struct inode *inode = dentry->d_inode;
  531. struct ceph_inode_info *ci = ceph_inode(inode);
  532. struct inode *parent_inode;
  533. struct ceph_mds_request *req;
  534. struct ceph_mds_client *mdsc = fsc->mdsc;
  535. int err;
  536. int i, nr_pages;
  537. struct page **pages = NULL;
  538. void *kaddr;
  539. /* copy value into some pages */
  540. nr_pages = calc_pages_for(0, size);
  541. if (nr_pages) {
  542. pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
  543. if (!pages)
  544. return -ENOMEM;
  545. err = -ENOMEM;
  546. for (i = 0; i < nr_pages; i++) {
  547. pages[i] = __page_cache_alloc(GFP_NOFS);
  548. if (!pages[i]) {
  549. nr_pages = i;
  550. goto out;
  551. }
  552. kaddr = kmap(pages[i]);
  553. memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
  554. min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
  555. }
  556. }
  557. dout("setxattr value=%.*s\n", (int)size, value);
  558. /* do request */
  559. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
  560. USE_AUTH_MDS);
  561. if (IS_ERR(req)) {
  562. err = PTR_ERR(req);
  563. goto out;
  564. }
  565. req->r_inode = inode;
  566. ihold(inode);
  567. req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
  568. req->r_num_caps = 1;
  569. req->r_args.setxattr.flags = cpu_to_le32(flags);
  570. req->r_path2 = kstrdup(name, GFP_NOFS);
  571. req->r_pages = pages;
  572. req->r_num_pages = nr_pages;
  573. req->r_data_len = size;
  574. dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
  575. parent_inode = ceph_get_dentry_parent_inode(dentry);
  576. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  577. iput(parent_inode);
  578. ceph_mdsc_put_request(req);
  579. dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
  580. out:
  581. if (pages) {
  582. for (i = 0; i < nr_pages; i++)
  583. __free_page(pages[i]);
  584. kfree(pages);
  585. }
  586. return err;
  587. }
  588. int ceph_setxattr(struct dentry *dentry, const char *name,
  589. const void *value, size_t size, int flags)
  590. {
  591. struct inode *inode = dentry->d_inode;
  592. struct ceph_inode_info *ci = ceph_inode(inode);
  593. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  594. int err;
  595. int name_len = strlen(name);
  596. int val_len = size;
  597. char *newname = NULL;
  598. char *newval = NULL;
  599. struct ceph_inode_xattr *xattr = NULL;
  600. int issued;
  601. int required_blob_size;
  602. int dirty;
  603. if (ceph_snap(inode) != CEPH_NOSNAP)
  604. return -EROFS;
  605. if (!ceph_is_valid_xattr(name))
  606. return -EOPNOTSUPP;
  607. if (vxattrs) {
  608. struct ceph_vxattr_cb *vxattr =
  609. ceph_match_vxattr(vxattrs, name);
  610. if (vxattr && vxattr->readonly)
  611. return -EOPNOTSUPP;
  612. }
  613. /* preallocate memory for xattr name, value, index node */
  614. err = -ENOMEM;
  615. newname = kmemdup(name, name_len + 1, GFP_NOFS);
  616. if (!newname)
  617. goto out;
  618. if (val_len) {
  619. newval = kmalloc(val_len + 1, GFP_NOFS);
  620. if (!newval)
  621. goto out;
  622. memcpy(newval, value, val_len);
  623. newval[val_len] = '\0';
  624. }
  625. xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
  626. if (!xattr)
  627. goto out;
  628. spin_lock(&ci->i_ceph_lock);
  629. retry:
  630. issued = __ceph_caps_issued(ci, NULL);
  631. if (!(issued & CEPH_CAP_XATTR_EXCL))
  632. goto do_sync;
  633. __build_xattrs(inode);
  634. required_blob_size = __get_required_blob_size(ci, name_len, val_len);
  635. if (!ci->i_xattrs.prealloc_blob ||
  636. required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
  637. struct ceph_buffer *blob = NULL;
  638. spin_unlock(&ci->i_ceph_lock);
  639. dout(" preaallocating new blob size=%d\n", required_blob_size);
  640. blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
  641. if (!blob)
  642. goto out;
  643. spin_lock(&ci->i_ceph_lock);
  644. if (ci->i_xattrs.prealloc_blob)
  645. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  646. ci->i_xattrs.prealloc_blob = blob;
  647. goto retry;
  648. }
  649. dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
  650. err = __set_xattr(ci, newname, name_len, newval,
  651. val_len, 1, 1, 1, &xattr);
  652. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
  653. ci->i_xattrs.dirty = true;
  654. inode->i_ctime = CURRENT_TIME;
  655. spin_unlock(&ci->i_ceph_lock);
  656. if (dirty)
  657. __mark_inode_dirty(inode, dirty);
  658. return err;
  659. do_sync:
  660. spin_unlock(&ci->i_ceph_lock);
  661. err = ceph_sync_setxattr(dentry, name, value, size, flags);
  662. out:
  663. kfree(newname);
  664. kfree(newval);
  665. kfree(xattr);
  666. return err;
  667. }
  668. static int ceph_send_removexattr(struct dentry *dentry, const char *name)
  669. {
  670. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  671. struct ceph_mds_client *mdsc = fsc->mdsc;
  672. struct inode *inode = dentry->d_inode;
  673. struct inode *parent_inode;
  674. struct ceph_mds_request *req;
  675. int err;
  676. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
  677. USE_AUTH_MDS);
  678. if (IS_ERR(req))
  679. return PTR_ERR(req);
  680. req->r_inode = inode;
  681. ihold(inode);
  682. req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
  683. req->r_num_caps = 1;
  684. req->r_path2 = kstrdup(name, GFP_NOFS);
  685. parent_inode = ceph_get_dentry_parent_inode(dentry);
  686. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  687. iput(parent_inode);
  688. ceph_mdsc_put_request(req);
  689. return err;
  690. }
  691. int ceph_removexattr(struct dentry *dentry, const char *name)
  692. {
  693. struct inode *inode = dentry->d_inode;
  694. struct ceph_inode_info *ci = ceph_inode(inode);
  695. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  696. int issued;
  697. int err;
  698. int required_blob_size;
  699. int dirty;
  700. if (ceph_snap(inode) != CEPH_NOSNAP)
  701. return -EROFS;
  702. if (!ceph_is_valid_xattr(name))
  703. return -EOPNOTSUPP;
  704. if (vxattrs) {
  705. struct ceph_vxattr_cb *vxattr =
  706. ceph_match_vxattr(vxattrs, name);
  707. if (vxattr && vxattr->readonly)
  708. return -EOPNOTSUPP;
  709. }
  710. err = -ENOMEM;
  711. spin_lock(&ci->i_ceph_lock);
  712. __build_xattrs(inode);
  713. retry:
  714. issued = __ceph_caps_issued(ci, NULL);
  715. dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
  716. if (!(issued & CEPH_CAP_XATTR_EXCL))
  717. goto do_sync;
  718. required_blob_size = __get_required_blob_size(ci, 0, 0);
  719. if (!ci->i_xattrs.prealloc_blob ||
  720. required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
  721. struct ceph_buffer *blob;
  722. spin_unlock(&ci->i_ceph_lock);
  723. dout(" preaallocating new blob size=%d\n", required_blob_size);
  724. blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
  725. if (!blob)
  726. goto out;
  727. spin_lock(&ci->i_ceph_lock);
  728. if (ci->i_xattrs.prealloc_blob)
  729. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  730. ci->i_xattrs.prealloc_blob = blob;
  731. goto retry;
  732. }
  733. err = __remove_xattr_by_name(ceph_inode(inode), name);
  734. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
  735. ci->i_xattrs.dirty = true;
  736. inode->i_ctime = CURRENT_TIME;
  737. spin_unlock(&ci->i_ceph_lock);
  738. if (dirty)
  739. __mark_inode_dirty(inode, dirty);
  740. return err;
  741. do_sync:
  742. spin_unlock(&ci->i_ceph_lock);
  743. err = ceph_send_removexattr(dentry, name);
  744. out:
  745. return err;
  746. }