xattr.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. #include "ceph_debug.h"
  2. #include "super.h"
  3. #include "decode.h"
  4. #include <linux/xattr.h>
  5. #include <linux/slab.h>
  6. static bool ceph_is_valid_xattr(const char *name)
  7. {
  8. return !strncmp(name, "ceph.", 5) ||
  9. !strncmp(name, XATTR_SECURITY_PREFIX,
  10. XATTR_SECURITY_PREFIX_LEN) ||
  11. !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
  12. !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
  13. }
  14. /*
  15. * These define virtual xattrs exposing the recursive directory
  16. * statistics and layout metadata.
  17. */
  18. struct ceph_vxattr_cb {
  19. bool readonly;
  20. char *name;
  21. size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
  22. size_t size);
  23. };
  24. /* directories */
  25. static size_t ceph_vxattrcb_entries(struct ceph_inode_info *ci, char *val,
  26. size_t size)
  27. {
  28. return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
  29. }
  30. static size_t ceph_vxattrcb_files(struct ceph_inode_info *ci, char *val,
  31. size_t size)
  32. {
  33. return snprintf(val, size, "%lld", ci->i_files);
  34. }
  35. static size_t ceph_vxattrcb_subdirs(struct ceph_inode_info *ci, char *val,
  36. size_t size)
  37. {
  38. return snprintf(val, size, "%lld", ci->i_subdirs);
  39. }
  40. static size_t ceph_vxattrcb_rentries(struct ceph_inode_info *ci, char *val,
  41. size_t size)
  42. {
  43. return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
  44. }
  45. static size_t ceph_vxattrcb_rfiles(struct ceph_inode_info *ci, char *val,
  46. size_t size)
  47. {
  48. return snprintf(val, size, "%lld", ci->i_rfiles);
  49. }
  50. static size_t ceph_vxattrcb_rsubdirs(struct ceph_inode_info *ci, char *val,
  51. size_t size)
  52. {
  53. return snprintf(val, size, "%lld", ci->i_rsubdirs);
  54. }
  55. static size_t ceph_vxattrcb_rbytes(struct ceph_inode_info *ci, char *val,
  56. size_t size)
  57. {
  58. return snprintf(val, size, "%lld", ci->i_rbytes);
  59. }
  60. static size_t ceph_vxattrcb_rctime(struct ceph_inode_info *ci, char *val,
  61. size_t size)
  62. {
  63. return snprintf(val, size, "%ld.%ld", (long)ci->i_rctime.tv_sec,
  64. (long)ci->i_rctime.tv_nsec);
  65. }
  66. static struct ceph_vxattr_cb ceph_dir_vxattrs[] = {
  67. { true, "ceph.dir.entries", ceph_vxattrcb_entries},
  68. { true, "ceph.dir.files", ceph_vxattrcb_files},
  69. { true, "ceph.dir.subdirs", ceph_vxattrcb_subdirs},
  70. { true, "ceph.dir.rentries", ceph_vxattrcb_rentries},
  71. { true, "ceph.dir.rfiles", ceph_vxattrcb_rfiles},
  72. { true, "ceph.dir.rsubdirs", ceph_vxattrcb_rsubdirs},
  73. { true, "ceph.dir.rbytes", ceph_vxattrcb_rbytes},
  74. { true, "ceph.dir.rctime", ceph_vxattrcb_rctime},
  75. { true, NULL, NULL }
  76. };
  77. /* files */
  78. static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
  79. size_t size)
  80. {
  81. int ret;
  82. ret = snprintf(val, size,
  83. "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
  84. (unsigned long long)ceph_file_layout_su(ci->i_layout),
  85. (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
  86. (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
  87. if (ceph_file_layout_pg_preferred(ci->i_layout))
  88. ret += snprintf(val + ret, size, "preferred_osd=%lld\n",
  89. (unsigned long long)ceph_file_layout_pg_preferred(
  90. ci->i_layout));
  91. return ret;
  92. }
  93. static struct ceph_vxattr_cb ceph_file_vxattrs[] = {
  94. { true, "ceph.layout", ceph_vxattrcb_layout},
  95. { NULL, NULL }
  96. };
  97. static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode)
  98. {
  99. if (S_ISDIR(inode->i_mode))
  100. return ceph_dir_vxattrs;
  101. else if (S_ISREG(inode->i_mode))
  102. return ceph_file_vxattrs;
  103. return NULL;
  104. }
  105. static struct ceph_vxattr_cb *ceph_match_vxattr(struct ceph_vxattr_cb *vxattr,
  106. const char *name)
  107. {
  108. do {
  109. if (strcmp(vxattr->name, name) == 0)
  110. return vxattr;
  111. vxattr++;
  112. } while (vxattr->name);
  113. return NULL;
  114. }
  115. static int __set_xattr(struct ceph_inode_info *ci,
  116. const char *name, int name_len,
  117. const char *val, int val_len,
  118. int dirty,
  119. int should_free_name, int should_free_val,
  120. struct ceph_inode_xattr **newxattr)
  121. {
  122. struct rb_node **p;
  123. struct rb_node *parent = NULL;
  124. struct ceph_inode_xattr *xattr = NULL;
  125. int c;
  126. int new = 0;
  127. p = &ci->i_xattrs.index.rb_node;
  128. while (*p) {
  129. parent = *p;
  130. xattr = rb_entry(parent, struct ceph_inode_xattr, node);
  131. c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
  132. if (c < 0)
  133. p = &(*p)->rb_left;
  134. else if (c > 0)
  135. p = &(*p)->rb_right;
  136. else {
  137. if (name_len == xattr->name_len)
  138. break;
  139. else if (name_len < xattr->name_len)
  140. p = &(*p)->rb_left;
  141. else
  142. p = &(*p)->rb_right;
  143. }
  144. xattr = NULL;
  145. }
  146. if (!xattr) {
  147. new = 1;
  148. xattr = *newxattr;
  149. xattr->name = name;
  150. xattr->name_len = name_len;
  151. xattr->should_free_name = should_free_name;
  152. ci->i_xattrs.count++;
  153. dout("__set_xattr count=%d\n", ci->i_xattrs.count);
  154. } else {
  155. kfree(*newxattr);
  156. *newxattr = NULL;
  157. if (xattr->should_free_val)
  158. kfree((void *)xattr->val);
  159. if (should_free_name) {
  160. kfree((void *)name);
  161. name = xattr->name;
  162. }
  163. ci->i_xattrs.names_size -= xattr->name_len;
  164. ci->i_xattrs.vals_size -= xattr->val_len;
  165. }
  166. ci->i_xattrs.names_size += name_len;
  167. ci->i_xattrs.vals_size += val_len;
  168. if (val)
  169. xattr->val = val;
  170. else
  171. xattr->val = "";
  172. xattr->val_len = val_len;
  173. xattr->dirty = dirty;
  174. xattr->should_free_val = (val && should_free_val);
  175. if (new) {
  176. rb_link_node(&xattr->node, parent, p);
  177. rb_insert_color(&xattr->node, &ci->i_xattrs.index);
  178. dout("__set_xattr_val p=%p\n", p);
  179. }
  180. dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
  181. ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
  182. return 0;
  183. }
  184. static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
  185. const char *name)
  186. {
  187. struct rb_node **p;
  188. struct rb_node *parent = NULL;
  189. struct ceph_inode_xattr *xattr = NULL;
  190. int c;
  191. p = &ci->i_xattrs.index.rb_node;
  192. while (*p) {
  193. parent = *p;
  194. xattr = rb_entry(parent, struct ceph_inode_xattr, node);
  195. c = strncmp(name, xattr->name, xattr->name_len);
  196. if (c < 0)
  197. p = &(*p)->rb_left;
  198. else if (c > 0)
  199. p = &(*p)->rb_right;
  200. else {
  201. dout("__get_xattr %s: found %.*s\n", name,
  202. xattr->val_len, xattr->val);
  203. return xattr;
  204. }
  205. }
  206. dout("__get_xattr %s: not found\n", name);
  207. return NULL;
  208. }
  209. static void __free_xattr(struct ceph_inode_xattr *xattr)
  210. {
  211. BUG_ON(!xattr);
  212. if (xattr->should_free_name)
  213. kfree((void *)xattr->name);
  214. if (xattr->should_free_val)
  215. kfree((void *)xattr->val);
  216. kfree(xattr);
  217. }
  218. static int __remove_xattr(struct ceph_inode_info *ci,
  219. struct ceph_inode_xattr *xattr)
  220. {
  221. if (!xattr)
  222. return -EOPNOTSUPP;
  223. rb_erase(&xattr->node, &ci->i_xattrs.index);
  224. if (xattr->should_free_name)
  225. kfree((void *)xattr->name);
  226. if (xattr->should_free_val)
  227. kfree((void *)xattr->val);
  228. ci->i_xattrs.names_size -= xattr->name_len;
  229. ci->i_xattrs.vals_size -= xattr->val_len;
  230. ci->i_xattrs.count--;
  231. kfree(xattr);
  232. return 0;
  233. }
  234. static int __remove_xattr_by_name(struct ceph_inode_info *ci,
  235. const char *name)
  236. {
  237. struct rb_node **p;
  238. struct ceph_inode_xattr *xattr;
  239. int err;
  240. p = &ci->i_xattrs.index.rb_node;
  241. xattr = __get_xattr(ci, name);
  242. err = __remove_xattr(ci, xattr);
  243. return err;
  244. }
  245. static char *__copy_xattr_names(struct ceph_inode_info *ci,
  246. char *dest)
  247. {
  248. struct rb_node *p;
  249. struct ceph_inode_xattr *xattr = NULL;
  250. p = rb_first(&ci->i_xattrs.index);
  251. dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
  252. while (p) {
  253. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  254. memcpy(dest, xattr->name, xattr->name_len);
  255. dest[xattr->name_len] = '\0';
  256. dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
  257. xattr->name_len, ci->i_xattrs.names_size);
  258. dest += xattr->name_len + 1;
  259. p = rb_next(p);
  260. }
  261. return dest;
  262. }
  263. void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
  264. {
  265. struct rb_node *p, *tmp;
  266. struct ceph_inode_xattr *xattr = NULL;
  267. p = rb_first(&ci->i_xattrs.index);
  268. dout("__ceph_destroy_xattrs p=%p\n", p);
  269. while (p) {
  270. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  271. tmp = p;
  272. p = rb_next(tmp);
  273. dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
  274. xattr->name_len, xattr->name);
  275. rb_erase(tmp, &ci->i_xattrs.index);
  276. __free_xattr(xattr);
  277. }
  278. ci->i_xattrs.names_size = 0;
  279. ci->i_xattrs.vals_size = 0;
  280. ci->i_xattrs.index_version = 0;
  281. ci->i_xattrs.count = 0;
  282. ci->i_xattrs.index = RB_ROOT;
  283. }
  284. static int __build_xattrs(struct inode *inode)
  285. __releases(inode->i_lock)
  286. __acquires(inode->i_lock)
  287. {
  288. u32 namelen;
  289. u32 numattr = 0;
  290. void *p, *end;
  291. u32 len;
  292. const char *name, *val;
  293. struct ceph_inode_info *ci = ceph_inode(inode);
  294. int xattr_version;
  295. struct ceph_inode_xattr **xattrs = NULL;
  296. int err = 0;
  297. int i;
  298. dout("__build_xattrs() len=%d\n",
  299. ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
  300. if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
  301. return 0; /* already built */
  302. __ceph_destroy_xattrs(ci);
  303. start:
  304. /* updated internal xattr rb tree */
  305. if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
  306. p = ci->i_xattrs.blob->vec.iov_base;
  307. end = p + ci->i_xattrs.blob->vec.iov_len;
  308. ceph_decode_32_safe(&p, end, numattr, bad);
  309. xattr_version = ci->i_xattrs.version;
  310. spin_unlock(&inode->i_lock);
  311. xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
  312. GFP_NOFS);
  313. err = -ENOMEM;
  314. if (!xattrs)
  315. goto bad_lock;
  316. memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
  317. for (i = 0; i < numattr; i++) {
  318. xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
  319. GFP_NOFS);
  320. if (!xattrs[i])
  321. goto bad_lock;
  322. }
  323. spin_lock(&inode->i_lock);
  324. if (ci->i_xattrs.version != xattr_version) {
  325. /* lost a race, retry */
  326. for (i = 0; i < numattr; i++)
  327. kfree(xattrs[i]);
  328. kfree(xattrs);
  329. goto start;
  330. }
  331. err = -EIO;
  332. while (numattr--) {
  333. ceph_decode_32_safe(&p, end, len, bad);
  334. namelen = len;
  335. name = p;
  336. p += len;
  337. ceph_decode_32_safe(&p, end, len, bad);
  338. val = p;
  339. p += len;
  340. err = __set_xattr(ci, name, namelen, val, len,
  341. 0, 0, 0, &xattrs[numattr]);
  342. if (err < 0)
  343. goto bad;
  344. }
  345. kfree(xattrs);
  346. }
  347. ci->i_xattrs.index_version = ci->i_xattrs.version;
  348. ci->i_xattrs.dirty = false;
  349. return err;
  350. bad_lock:
  351. spin_lock(&inode->i_lock);
  352. bad:
  353. if (xattrs) {
  354. for (i = 0; i < numattr; i++)
  355. kfree(xattrs[i]);
  356. kfree(xattrs);
  357. }
  358. ci->i_xattrs.names_size = 0;
  359. return err;
  360. }
  361. static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
  362. int val_size)
  363. {
  364. /*
  365. * 4 bytes for the length, and additional 4 bytes per each xattr name,
  366. * 4 bytes per each value
  367. */
  368. int size = 4 + ci->i_xattrs.count*(4 + 4) +
  369. ci->i_xattrs.names_size +
  370. ci->i_xattrs.vals_size;
  371. dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
  372. ci->i_xattrs.count, ci->i_xattrs.names_size,
  373. ci->i_xattrs.vals_size);
  374. if (name_size)
  375. size += 4 + 4 + name_size + val_size;
  376. return size;
  377. }
  378. /*
  379. * If there are dirty xattrs, reencode xattrs into the prealloc_blob
  380. * and swap into place.
  381. */
  382. void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
  383. {
  384. struct rb_node *p;
  385. struct ceph_inode_xattr *xattr = NULL;
  386. void *dest;
  387. dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
  388. if (ci->i_xattrs.dirty) {
  389. int need = __get_required_blob_size(ci, 0, 0);
  390. BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
  391. p = rb_first(&ci->i_xattrs.index);
  392. dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
  393. ceph_encode_32(&dest, ci->i_xattrs.count);
  394. while (p) {
  395. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  396. ceph_encode_32(&dest, xattr->name_len);
  397. memcpy(dest, xattr->name, xattr->name_len);
  398. dest += xattr->name_len;
  399. ceph_encode_32(&dest, xattr->val_len);
  400. memcpy(dest, xattr->val, xattr->val_len);
  401. dest += xattr->val_len;
  402. p = rb_next(p);
  403. }
  404. /* adjust buffer len; it may be larger than we need */
  405. ci->i_xattrs.prealloc_blob->vec.iov_len =
  406. dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
  407. if (ci->i_xattrs.blob)
  408. ceph_buffer_put(ci->i_xattrs.blob);
  409. ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
  410. ci->i_xattrs.prealloc_blob = NULL;
  411. ci->i_xattrs.dirty = false;
  412. ci->i_xattrs.version++;
  413. }
  414. }
  415. ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
  416. size_t size)
  417. {
  418. struct inode *inode = dentry->d_inode;
  419. struct ceph_inode_info *ci = ceph_inode(inode);
  420. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  421. int err;
  422. struct ceph_inode_xattr *xattr;
  423. struct ceph_vxattr_cb *vxattr = NULL;
  424. if (!ceph_is_valid_xattr(name))
  425. return -ENODATA;
  426. /* let's see if a virtual xattr was requested */
  427. if (vxattrs)
  428. vxattr = ceph_match_vxattr(vxattrs, name);
  429. spin_lock(&inode->i_lock);
  430. dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
  431. ci->i_xattrs.version, ci->i_xattrs.index_version);
  432. if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
  433. (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
  434. goto get_xattr;
  435. } else {
  436. spin_unlock(&inode->i_lock);
  437. /* get xattrs from mds (if we don't already have them) */
  438. err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
  439. if (err)
  440. return err;
  441. }
  442. spin_lock(&inode->i_lock);
  443. if (vxattr && vxattr->readonly) {
  444. err = vxattr->getxattr_cb(ci, value, size);
  445. goto out;
  446. }
  447. err = __build_xattrs(inode);
  448. if (err < 0)
  449. goto out;
  450. get_xattr:
  451. err = -ENODATA; /* == ENOATTR */
  452. xattr = __get_xattr(ci, name);
  453. if (!xattr) {
  454. if (vxattr)
  455. err = vxattr->getxattr_cb(ci, value, size);
  456. goto out;
  457. }
  458. err = -ERANGE;
  459. if (size && size < xattr->val_len)
  460. goto out;
  461. err = xattr->val_len;
  462. if (size == 0)
  463. goto out;
  464. memcpy(value, xattr->val, xattr->val_len);
  465. out:
  466. spin_unlock(&inode->i_lock);
  467. return err;
  468. }
  469. ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
  470. {
  471. struct inode *inode = dentry->d_inode;
  472. struct ceph_inode_info *ci = ceph_inode(inode);
  473. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  474. u32 vir_namelen = 0;
  475. u32 namelen;
  476. int err;
  477. u32 len;
  478. int i;
  479. spin_lock(&inode->i_lock);
  480. dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
  481. ci->i_xattrs.version, ci->i_xattrs.index_version);
  482. if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
  483. (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
  484. goto list_xattr;
  485. } else {
  486. spin_unlock(&inode->i_lock);
  487. err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
  488. if (err)
  489. return err;
  490. }
  491. spin_lock(&inode->i_lock);
  492. err = __build_xattrs(inode);
  493. if (err < 0)
  494. goto out;
  495. list_xattr:
  496. vir_namelen = 0;
  497. /* include virtual dir xattrs */
  498. if (vxattrs)
  499. for (i = 0; vxattrs[i].name; i++)
  500. vir_namelen += strlen(vxattrs[i].name) + 1;
  501. /* adding 1 byte per each variable due to the null termination */
  502. namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
  503. err = -ERANGE;
  504. if (size && namelen > size)
  505. goto out;
  506. err = namelen;
  507. if (size == 0)
  508. goto out;
  509. names = __copy_xattr_names(ci, names);
  510. /* virtual xattr names, too */
  511. if (vxattrs)
  512. for (i = 0; vxattrs[i].name; i++) {
  513. len = sprintf(names, "%s", vxattrs[i].name);
  514. names += len + 1;
  515. }
  516. out:
  517. spin_unlock(&inode->i_lock);
  518. return err;
  519. }
  520. static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
  521. const char *value, size_t size, int flags)
  522. {
  523. struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
  524. struct inode *inode = dentry->d_inode;
  525. struct ceph_inode_info *ci = ceph_inode(inode);
  526. struct inode *parent_inode = dentry->d_parent->d_inode;
  527. struct ceph_mds_request *req;
  528. struct ceph_mds_client *mdsc = &client->mdsc;
  529. int err;
  530. int i, nr_pages;
  531. struct page **pages = NULL;
  532. void *kaddr;
  533. /* copy value into some pages */
  534. nr_pages = calc_pages_for(0, size);
  535. if (nr_pages) {
  536. pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
  537. if (!pages)
  538. return -ENOMEM;
  539. err = -ENOMEM;
  540. for (i = 0; i < nr_pages; i++) {
  541. pages[i] = __page_cache_alloc(GFP_NOFS);
  542. if (!pages[i]) {
  543. nr_pages = i;
  544. goto out;
  545. }
  546. kaddr = kmap(pages[i]);
  547. memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
  548. min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
  549. }
  550. }
  551. dout("setxattr value=%.*s\n", (int)size, value);
  552. /* do request */
  553. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
  554. USE_AUTH_MDS);
  555. if (IS_ERR(req)) {
  556. err = PTR_ERR(req);
  557. goto out;
  558. }
  559. req->r_inode = igrab(inode);
  560. req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
  561. req->r_num_caps = 1;
  562. req->r_args.setxattr.flags = cpu_to_le32(flags);
  563. req->r_path2 = kstrdup(name, GFP_NOFS);
  564. req->r_pages = pages;
  565. req->r_num_pages = nr_pages;
  566. req->r_data_len = size;
  567. dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
  568. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  569. ceph_mdsc_put_request(req);
  570. dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
  571. out:
  572. if (pages) {
  573. for (i = 0; i < nr_pages; i++)
  574. __free_page(pages[i]);
  575. kfree(pages);
  576. }
  577. return err;
  578. }
  579. int ceph_setxattr(struct dentry *dentry, const char *name,
  580. const void *value, size_t size, int flags)
  581. {
  582. struct inode *inode = dentry->d_inode;
  583. struct ceph_inode_info *ci = ceph_inode(inode);
  584. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  585. int err;
  586. int name_len = strlen(name);
  587. int val_len = size;
  588. char *newname = NULL;
  589. char *newval = NULL;
  590. struct ceph_inode_xattr *xattr = NULL;
  591. int issued;
  592. int required_blob_size;
  593. if (ceph_snap(inode) != CEPH_NOSNAP)
  594. return -EROFS;
  595. if (!ceph_is_valid_xattr(name))
  596. return -EOPNOTSUPP;
  597. if (vxattrs) {
  598. struct ceph_vxattr_cb *vxattr =
  599. ceph_match_vxattr(vxattrs, name);
  600. if (vxattr && vxattr->readonly)
  601. return -EOPNOTSUPP;
  602. }
  603. /* preallocate memory for xattr name, value, index node */
  604. err = -ENOMEM;
  605. newname = kmalloc(name_len + 1, GFP_NOFS);
  606. if (!newname)
  607. goto out;
  608. memcpy(newname, name, name_len + 1);
  609. if (val_len) {
  610. newval = kmalloc(val_len + 1, GFP_NOFS);
  611. if (!newval)
  612. goto out;
  613. memcpy(newval, value, val_len);
  614. newval[val_len] = '\0';
  615. }
  616. xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
  617. if (!xattr)
  618. goto out;
  619. spin_lock(&inode->i_lock);
  620. retry:
  621. issued = __ceph_caps_issued(ci, NULL);
  622. if (!(issued & CEPH_CAP_XATTR_EXCL))
  623. goto do_sync;
  624. __build_xattrs(inode);
  625. required_blob_size = __get_required_blob_size(ci, name_len, val_len);
  626. if (!ci->i_xattrs.prealloc_blob ||
  627. required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
  628. struct ceph_buffer *blob = NULL;
  629. spin_unlock(&inode->i_lock);
  630. dout(" preaallocating new blob size=%d\n", required_blob_size);
  631. blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
  632. if (!blob)
  633. goto out;
  634. spin_lock(&inode->i_lock);
  635. if (ci->i_xattrs.prealloc_blob)
  636. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  637. ci->i_xattrs.prealloc_blob = blob;
  638. goto retry;
  639. }
  640. dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
  641. err = __set_xattr(ci, newname, name_len, newval,
  642. val_len, 1, 1, 1, &xattr);
  643. __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
  644. ci->i_xattrs.dirty = true;
  645. inode->i_ctime = CURRENT_TIME;
  646. spin_unlock(&inode->i_lock);
  647. return err;
  648. do_sync:
  649. spin_unlock(&inode->i_lock);
  650. err = ceph_sync_setxattr(dentry, name, value, size, flags);
  651. out:
  652. kfree(newname);
  653. kfree(newval);
  654. kfree(xattr);
  655. return err;
  656. }
  657. static int ceph_send_removexattr(struct dentry *dentry, const char *name)
  658. {
  659. struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
  660. struct ceph_mds_client *mdsc = &client->mdsc;
  661. struct inode *inode = dentry->d_inode;
  662. struct inode *parent_inode = dentry->d_parent->d_inode;
  663. struct ceph_mds_request *req;
  664. int err;
  665. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
  666. USE_AUTH_MDS);
  667. if (IS_ERR(req))
  668. return PTR_ERR(req);
  669. req->r_inode = igrab(inode);
  670. req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
  671. req->r_num_caps = 1;
  672. req->r_path2 = kstrdup(name, GFP_NOFS);
  673. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  674. ceph_mdsc_put_request(req);
  675. return err;
  676. }
  677. int ceph_removexattr(struct dentry *dentry, const char *name)
  678. {
  679. struct inode *inode = dentry->d_inode;
  680. struct ceph_inode_info *ci = ceph_inode(inode);
  681. struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
  682. int issued;
  683. int err;
  684. if (ceph_snap(inode) != CEPH_NOSNAP)
  685. return -EROFS;
  686. if (!ceph_is_valid_xattr(name))
  687. return -EOPNOTSUPP;
  688. if (vxattrs) {
  689. struct ceph_vxattr_cb *vxattr =
  690. ceph_match_vxattr(vxattrs, name);
  691. if (vxattr && vxattr->readonly)
  692. return -EOPNOTSUPP;
  693. }
  694. spin_lock(&inode->i_lock);
  695. __build_xattrs(inode);
  696. issued = __ceph_caps_issued(ci, NULL);
  697. dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
  698. if (!(issued & CEPH_CAP_XATTR_EXCL))
  699. goto do_sync;
  700. err = __remove_xattr_by_name(ceph_inode(inode), name);
  701. __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
  702. ci->i_xattrs.dirty = true;
  703. inode->i_ctime = CURRENT_TIME;
  704. spin_unlock(&inode->i_lock);
  705. return err;
  706. do_sync:
  707. spin_unlock(&inode->i_lock);
  708. err = ceph_send_removexattr(dentry, name);
  709. return err;
  710. }