xattr.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839
  1. #include "ceph_debug.h"
  2. #include "super.h"
  3. #include "decode.h"
  4. #include <linux/xattr.h>
  5. #include <linux/slab.h>
  6. static bool ceph_is_valid_xattr(const char *name)
  7. {
  8. return !strncmp(name, XATTR_SECURITY_PREFIX,
  9. XATTR_SECURITY_PREFIX_LEN) ||
  10. !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
  11. !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
  12. }
  13. /*
  14. * These define virtual xattrs exposing the recursive directory
  15. * statistics and layout metadata.
  16. */
  17. struct ceph_vxattr_cb {
  18. bool readonly;
  19. char *name;
  20. size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
  21. size_t size);
  22. };
  23. /* directories */
  24. static size_t ceph_vxattrcb_entries(struct ceph_inode_info *ci, char *val,
  25. size_t size)
  26. {
  27. return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
  28. }
  29. static size_t ceph_vxattrcb_files(struct ceph_inode_info *ci, char *val,
  30. size_t size)
  31. {
  32. return snprintf(val, size, "%lld", ci->i_files);
  33. }
  34. static size_t ceph_vxattrcb_subdirs(struct ceph_inode_info *ci, char *val,
  35. size_t size)
  36. {
  37. return snprintf(val, size, "%lld", ci->i_subdirs);
  38. }
  39. static size_t ceph_vxattrcb_rentries(struct ceph_inode_info *ci, char *val,
  40. size_t size)
  41. {
  42. return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
  43. }
  44. static size_t ceph_vxattrcb_rfiles(struct ceph_inode_info *ci, char *val,
  45. size_t size)
  46. {
  47. return snprintf(val, size, "%lld", ci->i_rfiles);
  48. }
  49. static size_t ceph_vxattrcb_rsubdirs(struct ceph_inode_info *ci, char *val,
  50. size_t size)
  51. {
  52. return snprintf(val, size, "%lld", ci->i_rsubdirs);
  53. }
  54. static size_t ceph_vxattrcb_rbytes(struct ceph_inode_info *ci, char *val,
  55. size_t size)
  56. {
  57. return snprintf(val, size, "%lld", ci->i_rbytes);
  58. }
  59. static size_t ceph_vxattrcb_rctime(struct ceph_inode_info *ci, char *val,
  60. size_t size)
  61. {
  62. return snprintf(val, size, "%ld.%ld", (long)ci->i_rctime.tv_sec,
  63. (long)ci->i_rctime.tv_nsec);
  64. }
  65. static struct ceph_vxattr_cb ceph_dir_vxattrs[] = {
  66. { true, "user.ceph.dir.entries", ceph_vxattrcb_entries},
  67. { true, "user.ceph.dir.files", ceph_vxattrcb_files},
  68. { true, "user.ceph.dir.subdirs", ceph_vxattrcb_subdirs},
  69. { true, "user.ceph.dir.rentries", ceph_vxattrcb_rentries},
  70. { true, "user.ceph.dir.rfiles", ceph_vxattrcb_rfiles},
  71. { true, "user.ceph.dir.rsubdirs", ceph_vxattrcb_rsubdirs},
  72. { true, "user.ceph.dir.rbytes", ceph_vxattrcb_rbytes},
  73. { true, "user.ceph.dir.rctime", ceph_vxattrcb_rctime},
  74. { true, NULL, NULL }
  75. };
  76. /* files */
  77. static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
  78. size_t size)
  79. {
  80. int ret;
  81. ret = snprintf(val, size,
  82. "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
  83. (unsigned long long)ceph_file_layout_su(ci->i_layout),
  84. (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
  85. (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
  86. if (ceph_file_layout_pg_preferred(ci->i_layout))
  87. ret += snprintf(val + ret, size, "preferred_osd=%lld\n",
  88. (unsigned long long)ceph_file_layout_pg_preferred(
  89. ci->i_layout));
  90. return ret;
  91. }
  92. static struct ceph_vxattr_cb ceph_file_vxattrs[] = {
  93. { true, "user.ceph.layout", ceph_vxattrcb_layout},
  94. { NULL, NULL }
  95. };
  96. static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode)
  97. {
  98. if (S_ISDIR(inode->i_mode))
  99. return ceph_dir_vxattrs;
  100. else if (S_ISREG(inode->i_mode))
  101. return ceph_file_vxattrs;
  102. return NULL;
  103. }
  104. static struct ceph_vxattr_cb *ceph_match_vxattr(struct ceph_vxattr_cb *vxattr,
  105. const char *name)
  106. {
  107. do {
  108. if (strcmp(vxattr->name, name) == 0)
  109. return vxattr;
  110. vxattr++;
  111. } while (vxattr->name);
  112. return NULL;
  113. }
  114. static int __set_xattr(struct ceph_inode_info *ci,
  115. const char *name, int name_len,
  116. const char *val, int val_len,
  117. int dirty,
  118. int should_free_name, int should_free_val,
  119. struct ceph_inode_xattr **newxattr)
  120. {
  121. struct rb_node **p;
  122. struct rb_node *parent = NULL;
  123. struct ceph_inode_xattr *xattr = NULL;
  124. int c;
  125. int new = 0;
  126. p = &ci->i_xattrs.index.rb_node;
  127. while (*p) {
  128. parent = *p;
  129. xattr = rb_entry(parent, struct ceph_inode_xattr, node);
  130. c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
  131. if (c < 0)
  132. p = &(*p)->rb_left;
  133. else if (c > 0)
  134. p = &(*p)->rb_right;
  135. else {
  136. if (name_len == xattr->name_len)
  137. break;
  138. else if (name_len < xattr->name_len)
  139. p = &(*p)->rb_left;
  140. else
  141. p = &(*p)->rb_right;
  142. }
  143. xattr = NULL;
  144. }
  145. if (!xattr) {
  146. new = 1;
  147. xattr = *newxattr;
  148. xattr->name = name;
  149. xattr->name_len = name_len;
  150. xattr->should_free_name = should_free_name;
  151. ci->i_xattrs.count++;
  152. dout("__set_xattr count=%d\n", ci->i_xattrs.count);
  153. } else {
  154. kfree(*newxattr);
  155. *newxattr = NULL;
  156. if (xattr->should_free_val)
  157. kfree((void *)xattr->val);
  158. if (should_free_name) {
  159. kfree((void *)name);
  160. name = xattr->name;
  161. }
  162. ci->i_xattrs.names_size -= xattr->name_len;
  163. ci->i_xattrs.vals_size -= xattr->val_len;
  164. }
  165. ci->i_xattrs.names_size += name_len;
  166. ci->i_xattrs.vals_size += val_len;
  167. if (val)
  168. xattr->val = val;
  169. else
  170. xattr->val = "";
  171. xattr->val_len = val_len;
  172. xattr->dirty = dirty;
  173. xattr->should_free_val = (val && should_free_val);
  174. if (new) {
  175. rb_link_node(&xattr->node, parent, p);
  176. rb_insert_color(&xattr->node, &ci->i_xattrs.index);
  177. dout("__set_xattr_val p=%p\n", p);
  178. }
  179. dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
  180. ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
  181. return 0;
  182. }
  183. static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
  184. const char *name)
  185. {
  186. struct rb_node **p;
  187. struct rb_node *parent = NULL;
  188. struct ceph_inode_xattr *xattr = NULL;
  189. int c;
  190. p = &ci->i_xattrs.index.rb_node;
  191. while (*p) {
  192. parent = *p;
  193. xattr = rb_entry(parent, struct ceph_inode_xattr, node);
  194. c = strncmp(name, xattr->name, xattr->name_len);
  195. if (c < 0)
  196. p = &(*p)->rb_left;
  197. else if (c > 0)
  198. p = &(*p)->rb_right;
  199. else {
  200. dout("__get_xattr %s: found %.*s\n", name,
  201. xattr->val_len, xattr->val);
  202. return xattr;
  203. }
  204. }
  205. dout("__get_xattr %s: not found\n", name);
  206. return NULL;
  207. }
  208. static void __free_xattr(struct ceph_inode_xattr *xattr)
  209. {
  210. BUG_ON(!xattr);
  211. if (xattr->should_free_name)
  212. kfree((void *)xattr->name);
  213. if (xattr->should_free_val)
  214. kfree((void *)xattr->val);
  215. kfree(xattr);
  216. }
  217. static int __remove_xattr(struct ceph_inode_info *ci,
  218. struct ceph_inode_xattr *xattr)
  219. {
  220. if (!xattr)
  221. return -EOPNOTSUPP;
  222. rb_erase(&xattr->node, &ci->i_xattrs.index);
  223. if (xattr->should_free_name)
  224. kfree((void *)xattr->name);
  225. if (xattr->should_free_val)
  226. kfree((void *)xattr->val);
  227. ci->i_xattrs.names_size -= xattr->name_len;
  228. ci->i_xattrs.vals_size -= xattr->val_len;
  229. ci->i_xattrs.count--;
  230. kfree(xattr);
  231. return 0;
  232. }
  233. static int __remove_xattr_by_name(struct ceph_inode_info *ci,
  234. const char *name)
  235. {
  236. struct rb_node **p;
  237. struct ceph_inode_xattr *xattr;
  238. int err;
  239. p = &ci->i_xattrs.index.rb_node;
  240. xattr = __get_xattr(ci, name);
  241. err = __remove_xattr(ci, xattr);
  242. return err;
  243. }
  244. static char *__copy_xattr_names(struct ceph_inode_info *ci,
  245. char *dest)
  246. {
  247. struct rb_node *p;
  248. struct ceph_inode_xattr *xattr = NULL;
  249. p = rb_first(&ci->i_xattrs.index);
  250. dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
  251. while (p) {
  252. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  253. memcpy(dest, xattr->name, xattr->name_len);
  254. dest[xattr->name_len] = '\0';
  255. dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
  256. xattr->name_len, ci->i_xattrs.names_size);
  257. dest += xattr->name_len + 1;
  258. p = rb_next(p);
  259. }
  260. return dest;
  261. }
  262. void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
  263. {
  264. struct rb_node *p, *tmp;
  265. struct ceph_inode_xattr *xattr = NULL;
  266. p = rb_first(&ci->i_xattrs.index);
  267. dout("__ceph_destroy_xattrs p=%p\n", p);
  268. while (p) {
  269. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  270. tmp = p;
  271. p = rb_next(tmp);
  272. dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
  273. xattr->name_len, xattr->name);
  274. rb_erase(tmp, &ci->i_xattrs.index);
  275. __free_xattr(xattr);
  276. }
  277. ci->i_xattrs.names_size = 0;
  278. ci->i_xattrs.vals_size = 0;
  279. ci->i_xattrs.index_version = 0;
  280. ci->i_xattrs.count = 0;
  281. ci->i_xattrs.index = RB_ROOT;
  282. }
  283. static int __build_xattrs(struct inode *inode)
  284. {
  285. u32 namelen;
  286. u32 numattr = 0;
  287. void *p, *end;
  288. u32 len;
  289. const char *name, *val;
  290. struct ceph_inode_info *ci = ceph_inode(inode);
  291. int xattr_version;
  292. struct ceph_inode_xattr **xattrs = NULL;
  293. int err = 0;
  294. int i;
  295. dout("__build_xattrs() len=%d\n",
  296. ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
  297. if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
  298. return 0; /* already built */
  299. __ceph_destroy_xattrs(ci);
  300. start:
  301. /* updated internal xattr rb tree */
  302. if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
  303. p = ci->i_xattrs.blob->vec.iov_base;
  304. end = p + ci->i_xattrs.blob->vec.iov_len;
  305. ceph_decode_32_safe(&p, end, numattr, bad);
  306. xattr_version = ci->i_xattrs.version;
  307. spin_unlock(&inode->i_lock);
  308. xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
  309. GFP_NOFS);
  310. err = -ENOMEM;
  311. if (!xattrs)
  312. goto bad_lock;
  313. memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
  314. for (i = 0; i < numattr; i++) {
  315. xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
  316. GFP_NOFS);
  317. if (!xattrs[i])
  318. goto bad_lock;
  319. }
  320. spin_lock(&inode->i_lock);
  321. if (ci->i_xattrs.version != xattr_version) {
  322. /* lost a race, retry */
  323. for (i = 0; i < numattr; i++)
  324. kfree(xattrs[i]);
  325. kfree(xattrs);
  326. goto start;
  327. }
  328. err = -EIO;
  329. while (numattr--) {
  330. ceph_decode_32_safe(&p, end, len, bad);
  331. namelen = len;
  332. name = p;
  333. p += len;
  334. ceph_decode_32_safe(&p, end, len, bad);
  335. val = p;
  336. p += len;
  337. err = __set_xattr(ci, name, namelen, val, len,
  338. 0, 0, 0, &xattrs[numattr]);
  339. if (err < 0)
  340. goto bad;
  341. }
  342. kfree(xattrs);
  343. }
  344. ci->i_xattrs.index_version = ci->i_xattrs.version;
  345. ci->i_xattrs.dirty = false;
  346. return err;
  347. bad_lock:
  348. spin_lock(&inode->i_lock);
  349. bad:
  350. if (xattrs) {
  351. for (i = 0; i < numattr; i++)
  352. kfree(xattrs[i]);
  353. kfree(xattrs);
  354. }
  355. ci->i_xattrs.names_size = 0;
  356. return err;
  357. }
  358. static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
  359. int val_size)
  360. {
  361. /*
  362. * 4 bytes for the length, and additional 4 bytes per each xattr name,
  363. * 4 bytes per each value
  364. */
  365. int size = 4 + ci->i_xattrs.count*(4 + 4) +
  366. ci->i_xattrs.names_size +
  367. ci->i_xattrs.vals_size;
  368. dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
  369. ci->i_xattrs.count, ci->i_xattrs.names_size,
  370. ci->i_xattrs.vals_size);
  371. if (name_size)
  372. size += 4 + 4 + name_size + val_size;
  373. return size;
  374. }
  375. /*
  376. * If there are dirty xattrs, reencode xattrs into the prealloc_blob
  377. * and swap into place.
  378. */
  379. void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
  380. {
  381. struct rb_node *p;
  382. struct ceph_inode_xattr *xattr = NULL;
  383. void *dest;
  384. dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
  385. if (ci->i_xattrs.dirty) {
  386. int need = __get_required_blob_size(ci, 0, 0);
  387. BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
  388. p = rb_first(&ci->i_xattrs.index);
  389. dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
  390. ceph_encode_32(&dest, ci->i_xattrs.count);
  391. while (p) {
  392. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  393. ceph_encode_32(&dest, xattr->name_len);
  394. memcpy(dest, xattr->name, xattr->name_len);
  395. dest += xattr->name_len;
  396. ceph_encode_32(&dest, xattr->val_len);
  397. memcpy(dest, xattr->val, xattr->val_len);
  398. dest += xattr->val_len;
  399. p = rb_next(p);
  400. }
  401. /* adjust buffer len; it may be larger than we need */
  402. ci->i_xattrs.prealloc_blob->vec.iov_len =
  403. dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
  404. if (ci->i_xattrs.blob)
  405. ceph_buffer_put(ci->i_xattrs.blob);
  406. ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
  407. ci->i_xattrs.prealloc_blob = NULL;
  408. ci->i_xattrs.dirty = false;
  409. }
  410. }
  411. ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
  412. size_t size)
  413. {
  414. struct inode *inode = dentry->d_inode;
  415. struct ceph_inode_info *ci = ceph_inode(inode);
  416. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  417. int err;
  418. struct ceph_inode_xattr *xattr;
  419. struct ceph_vxattr_cb *vxattr = NULL;
  420. if (!ceph_is_valid_xattr(name))
  421. return -ENODATA;
  422. /* let's see if a virtual xattr was requested */
  423. if (vxattrs)
  424. vxattr = ceph_match_vxattr(vxattrs, name);
  425. spin_lock(&inode->i_lock);
  426. dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
  427. ci->i_xattrs.version, ci->i_xattrs.index_version);
  428. if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
  429. (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
  430. goto get_xattr;
  431. } else {
  432. spin_unlock(&inode->i_lock);
  433. /* get xattrs from mds (if we don't already have them) */
  434. err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
  435. if (err)
  436. return err;
  437. }
  438. spin_lock(&inode->i_lock);
  439. if (vxattr && vxattr->readonly) {
  440. err = vxattr->getxattr_cb(ci, value, size);
  441. goto out;
  442. }
  443. err = __build_xattrs(inode);
  444. if (err < 0)
  445. goto out;
  446. get_xattr:
  447. err = -ENODATA; /* == ENOATTR */
  448. xattr = __get_xattr(ci, name);
  449. if (!xattr) {
  450. if (vxattr)
  451. err = vxattr->getxattr_cb(ci, value, size);
  452. goto out;
  453. }
  454. err = -ERANGE;
  455. if (size && size < xattr->val_len)
  456. goto out;
  457. err = xattr->val_len;
  458. if (size == 0)
  459. goto out;
  460. memcpy(value, xattr->val, xattr->val_len);
  461. out:
  462. spin_unlock(&inode->i_lock);
  463. return err;
  464. }
  465. ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
  466. {
  467. struct inode *inode = dentry->d_inode;
  468. struct ceph_inode_info *ci = ceph_inode(inode);
  469. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  470. u32 vir_namelen = 0;
  471. u32 namelen;
  472. int err;
  473. u32 len;
  474. int i;
  475. spin_lock(&inode->i_lock);
  476. dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
  477. ci->i_xattrs.version, ci->i_xattrs.index_version);
  478. if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
  479. (ci->i_xattrs.index_version > ci->i_xattrs.version)) {
  480. goto list_xattr;
  481. } else {
  482. spin_unlock(&inode->i_lock);
  483. err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
  484. if (err)
  485. return err;
  486. }
  487. spin_lock(&inode->i_lock);
  488. err = __build_xattrs(inode);
  489. if (err < 0)
  490. goto out;
  491. list_xattr:
  492. vir_namelen = 0;
  493. /* include virtual dir xattrs */
  494. if (vxattrs)
  495. for (i = 0; vxattrs[i].name; i++)
  496. vir_namelen += strlen(vxattrs[i].name) + 1;
  497. /* adding 1 byte per each variable due to the null termination */
  498. namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
  499. err = -ERANGE;
  500. if (size && namelen > size)
  501. goto out;
  502. err = namelen;
  503. if (size == 0)
  504. goto out;
  505. names = __copy_xattr_names(ci, names);
  506. /* virtual xattr names, too */
  507. if (vxattrs)
  508. for (i = 0; vxattrs[i].name; i++) {
  509. len = sprintf(names, "%s", vxattrs[i].name);
  510. names += len + 1;
  511. }
  512. out:
  513. spin_unlock(&inode->i_lock);
  514. return err;
  515. }
  516. static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
  517. const char *value, size_t size, int flags)
  518. {
  519. struct ceph_client *client = ceph_client(dentry->d_sb);
  520. struct inode *inode = dentry->d_inode;
  521. struct ceph_inode_info *ci = ceph_inode(inode);
  522. struct inode *parent_inode = dentry->d_parent->d_inode;
  523. struct ceph_mds_request *req;
  524. struct ceph_mds_client *mdsc = &client->mdsc;
  525. int err;
  526. int i, nr_pages;
  527. struct page **pages = NULL;
  528. void *kaddr;
  529. /* copy value into some pages */
  530. nr_pages = calc_pages_for(0, size);
  531. if (nr_pages) {
  532. pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
  533. if (!pages)
  534. return -ENOMEM;
  535. err = -ENOMEM;
  536. for (i = 0; i < nr_pages; i++) {
  537. pages[i] = __page_cache_alloc(GFP_NOFS);
  538. if (!pages[i]) {
  539. nr_pages = i;
  540. goto out;
  541. }
  542. kaddr = kmap(pages[i]);
  543. memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
  544. min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
  545. }
  546. }
  547. dout("setxattr value=%.*s\n", (int)size, value);
  548. /* do request */
  549. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
  550. USE_AUTH_MDS);
  551. if (IS_ERR(req)) {
  552. err = PTR_ERR(req);
  553. goto out;
  554. }
  555. req->r_inode = igrab(inode);
  556. req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
  557. req->r_num_caps = 1;
  558. req->r_args.setxattr.flags = cpu_to_le32(flags);
  559. req->r_path2 = kstrdup(name, GFP_NOFS);
  560. req->r_pages = pages;
  561. req->r_num_pages = nr_pages;
  562. req->r_data_len = size;
  563. dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
  564. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  565. ceph_mdsc_put_request(req);
  566. dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
  567. out:
  568. if (pages) {
  569. for (i = 0; i < nr_pages; i++)
  570. __free_page(pages[i]);
  571. kfree(pages);
  572. }
  573. return err;
  574. }
  575. int ceph_setxattr(struct dentry *dentry, const char *name,
  576. const void *value, size_t size, int flags)
  577. {
  578. struct inode *inode = dentry->d_inode;
  579. struct ceph_inode_info *ci = ceph_inode(inode);
  580. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  581. int err;
  582. int name_len = strlen(name);
  583. int val_len = size;
  584. char *newname = NULL;
  585. char *newval = NULL;
  586. struct ceph_inode_xattr *xattr = NULL;
  587. int issued;
  588. int required_blob_size;
  589. if (ceph_snap(inode) != CEPH_NOSNAP)
  590. return -EROFS;
  591. if (!ceph_is_valid_xattr(name))
  592. return -EOPNOTSUPP;
  593. if (vxattrs) {
  594. struct ceph_vxattr_cb *vxattr =
  595. ceph_match_vxattr(vxattrs, name);
  596. if (vxattr && vxattr->readonly)
  597. return -EOPNOTSUPP;
  598. }
  599. /* preallocate memory for xattr name, value, index node */
  600. err = -ENOMEM;
  601. newname = kmalloc(name_len + 1, GFP_NOFS);
  602. if (!newname)
  603. goto out;
  604. memcpy(newname, name, name_len + 1);
  605. if (val_len) {
  606. newval = kmalloc(val_len + 1, GFP_NOFS);
  607. if (!newval)
  608. goto out;
  609. memcpy(newval, value, val_len);
  610. newval[val_len] = '\0';
  611. }
  612. xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
  613. if (!xattr)
  614. goto out;
  615. spin_lock(&inode->i_lock);
  616. retry:
  617. issued = __ceph_caps_issued(ci, NULL);
  618. if (!(issued & CEPH_CAP_XATTR_EXCL))
  619. goto do_sync;
  620. __build_xattrs(inode);
  621. required_blob_size = __get_required_blob_size(ci, name_len, val_len);
  622. if (!ci->i_xattrs.prealloc_blob ||
  623. required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
  624. struct ceph_buffer *blob = NULL;
  625. spin_unlock(&inode->i_lock);
  626. dout(" preaallocating new blob size=%d\n", required_blob_size);
  627. blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
  628. if (!blob)
  629. goto out;
  630. spin_lock(&inode->i_lock);
  631. if (ci->i_xattrs.prealloc_blob)
  632. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  633. ci->i_xattrs.prealloc_blob = blob;
  634. goto retry;
  635. }
  636. dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
  637. err = __set_xattr(ci, newname, name_len, newval,
  638. val_len, 1, 1, 1, &xattr);
  639. __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
  640. ci->i_xattrs.dirty = true;
  641. inode->i_ctime = CURRENT_TIME;
  642. spin_unlock(&inode->i_lock);
  643. return err;
  644. do_sync:
  645. spin_unlock(&inode->i_lock);
  646. err = ceph_sync_setxattr(dentry, name, value, size, flags);
  647. out:
  648. kfree(newname);
  649. kfree(newval);
  650. kfree(xattr);
  651. return err;
  652. }
  653. static int ceph_send_removexattr(struct dentry *dentry, const char *name)
  654. {
  655. struct ceph_client *client = ceph_client(dentry->d_sb);
  656. struct ceph_mds_client *mdsc = &client->mdsc;
  657. struct inode *inode = dentry->d_inode;
  658. struct inode *parent_inode = dentry->d_parent->d_inode;
  659. struct ceph_mds_request *req;
  660. int err;
  661. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
  662. USE_AUTH_MDS);
  663. if (IS_ERR(req))
  664. return PTR_ERR(req);
  665. req->r_inode = igrab(inode);
  666. req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
  667. req->r_num_caps = 1;
  668. req->r_path2 = kstrdup(name, GFP_NOFS);
  669. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  670. ceph_mdsc_put_request(req);
  671. return err;
  672. }
  673. int ceph_removexattr(struct dentry *dentry, const char *name)
  674. {
  675. struct inode *inode = dentry->d_inode;
  676. struct ceph_inode_info *ci = ceph_inode(inode);
  677. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  678. int issued;
  679. int err;
  680. if (ceph_snap(inode) != CEPH_NOSNAP)
  681. return -EROFS;
  682. if (!ceph_is_valid_xattr(name))
  683. return -EOPNOTSUPP;
  684. if (vxattrs) {
  685. struct ceph_vxattr_cb *vxattr =
  686. ceph_match_vxattr(vxattrs, name);
  687. if (vxattr && vxattr->readonly)
  688. return -EOPNOTSUPP;
  689. }
  690. spin_lock(&inode->i_lock);
  691. __build_xattrs(inode);
  692. issued = __ceph_caps_issued(ci, NULL);
  693. dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
  694. if (!(issued & CEPH_CAP_XATTR_EXCL))
  695. goto do_sync;
  696. err = __remove_xattr_by_name(ceph_inode(inode), name);
  697. __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
  698. ci->i_xattrs.dirty = true;
  699. inode->i_ctime = CURRENT_TIME;
  700. spin_unlock(&inode->i_lock);
  701. return err;
  702. do_sync:
  703. spin_unlock(&inode->i_lock);
  704. err = ceph_send_removexattr(dentry, name);
  705. return err;
  706. }