xattr.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. #include <linux/ceph/ceph_debug.h>
  2. #include "super.h"
  3. #include "mds_client.h"
  4. #include <linux/ceph/decode.h>
  5. #include <linux/xattr.h>
  6. #include <linux/slab.h>
  7. #define XATTR_CEPH_PREFIX "ceph."
  8. #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
  9. static bool ceph_is_valid_xattr(const char *name)
  10. {
  11. return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) ||
  12. !strncmp(name, XATTR_SECURITY_PREFIX,
  13. XATTR_SECURITY_PREFIX_LEN) ||
  14. !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
  15. !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
  16. }
  17. /*
  18. * These define virtual xattrs exposing the recursive directory
  19. * statistics and layout metadata.
  20. */
  21. struct ceph_vxattr {
  22. char *name;
  23. size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
  24. size_t size);
  25. bool readonly;
  26. };
  27. /* directories */
  28. static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
  29. size_t size)
  30. {
  31. return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
  32. }
  33. static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val,
  34. size_t size)
  35. {
  36. return snprintf(val, size, "%lld", ci->i_files);
  37. }
  38. static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val,
  39. size_t size)
  40. {
  41. return snprintf(val, size, "%lld", ci->i_subdirs);
  42. }
  43. static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val,
  44. size_t size)
  45. {
  46. return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
  47. }
  48. static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val,
  49. size_t size)
  50. {
  51. return snprintf(val, size, "%lld", ci->i_rfiles);
  52. }
  53. static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val,
  54. size_t size)
  55. {
  56. return snprintf(val, size, "%lld", ci->i_rsubdirs);
  57. }
  58. static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
  59. size_t size)
  60. {
  61. return snprintf(val, size, "%lld", ci->i_rbytes);
  62. }
  63. static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
  64. size_t size)
  65. {
  66. return snprintf(val, size, "%ld.%ld", (long)ci->i_rctime.tv_sec,
  67. (long)ci->i_rctime.tv_nsec);
  68. }
  69. #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
  70. #define XATTR_NAME_CEPH(_type, _name) \
  71. { \
  72. .name = CEPH_XATTR_NAME(_type, _name), \
  73. .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \
  74. .readonly = true, \
  75. }
  76. static struct ceph_vxattr ceph_dir_vxattrs[] = {
  77. XATTR_NAME_CEPH(dir, entries),
  78. XATTR_NAME_CEPH(dir, files),
  79. XATTR_NAME_CEPH(dir, subdirs),
  80. XATTR_NAME_CEPH(dir, rentries),
  81. XATTR_NAME_CEPH(dir, rfiles),
  82. XATTR_NAME_CEPH(dir, rsubdirs),
  83. XATTR_NAME_CEPH(dir, rbytes),
  84. XATTR_NAME_CEPH(dir, rctime),
  85. { 0 } /* Required table terminator */
  86. };
  87. /* files */
  88. static size_t ceph_vxattrcb_file_layout(struct ceph_inode_info *ci, char *val,
  89. size_t size)
  90. {
  91. int ret;
  92. ret = snprintf(val, size,
  93. "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
  94. (unsigned long long)ceph_file_layout_su(ci->i_layout),
  95. (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
  96. (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
  97. if (ceph_file_layout_pg_preferred(ci->i_layout))
  98. ret += snprintf(val + ret, size, "preferred_osd=%lld\n",
  99. (unsigned long long)ceph_file_layout_pg_preferred(
  100. ci->i_layout));
  101. return ret;
  102. }
  103. static struct ceph_vxattr ceph_file_vxattrs[] = {
  104. XATTR_NAME_CEPH(file, layout),
  105. /* The following extended attribute name is deprecated */
  106. {
  107. .name = XATTR_CEPH_PREFIX "layout",
  108. .getxattr_cb = ceph_vxattrcb_file_layout,
  109. .readonly = true,
  110. },
  111. { 0 } /* Required table terminator */
  112. };
  113. static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode)
  114. {
  115. if (S_ISDIR(inode->i_mode))
  116. return ceph_dir_vxattrs;
  117. else if (S_ISREG(inode->i_mode))
  118. return ceph_file_vxattrs;
  119. return NULL;
  120. }
  121. static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
  122. const char *name)
  123. {
  124. struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode);
  125. if (vxattr) {
  126. while (vxattr->name) {
  127. if (!strcmp(vxattr->name, name))
  128. return vxattr;
  129. vxattr++;
  130. }
  131. }
  132. return NULL;
  133. }
  134. static int __set_xattr(struct ceph_inode_info *ci,
  135. const char *name, int name_len,
  136. const char *val, int val_len,
  137. int dirty,
  138. int should_free_name, int should_free_val,
  139. struct ceph_inode_xattr **newxattr)
  140. {
  141. struct rb_node **p;
  142. struct rb_node *parent = NULL;
  143. struct ceph_inode_xattr *xattr = NULL;
  144. int c;
  145. int new = 0;
  146. p = &ci->i_xattrs.index.rb_node;
  147. while (*p) {
  148. parent = *p;
  149. xattr = rb_entry(parent, struct ceph_inode_xattr, node);
  150. c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
  151. if (c < 0)
  152. p = &(*p)->rb_left;
  153. else if (c > 0)
  154. p = &(*p)->rb_right;
  155. else {
  156. if (name_len == xattr->name_len)
  157. break;
  158. else if (name_len < xattr->name_len)
  159. p = &(*p)->rb_left;
  160. else
  161. p = &(*p)->rb_right;
  162. }
  163. xattr = NULL;
  164. }
  165. if (!xattr) {
  166. new = 1;
  167. xattr = *newxattr;
  168. xattr->name = name;
  169. xattr->name_len = name_len;
  170. xattr->should_free_name = should_free_name;
  171. ci->i_xattrs.count++;
  172. dout("__set_xattr count=%d\n", ci->i_xattrs.count);
  173. } else {
  174. kfree(*newxattr);
  175. *newxattr = NULL;
  176. if (xattr->should_free_val)
  177. kfree((void *)xattr->val);
  178. if (should_free_name) {
  179. kfree((void *)name);
  180. name = xattr->name;
  181. }
  182. ci->i_xattrs.names_size -= xattr->name_len;
  183. ci->i_xattrs.vals_size -= xattr->val_len;
  184. }
  185. ci->i_xattrs.names_size += name_len;
  186. ci->i_xattrs.vals_size += val_len;
  187. if (val)
  188. xattr->val = val;
  189. else
  190. xattr->val = "";
  191. xattr->val_len = val_len;
  192. xattr->dirty = dirty;
  193. xattr->should_free_val = (val && should_free_val);
  194. if (new) {
  195. rb_link_node(&xattr->node, parent, p);
  196. rb_insert_color(&xattr->node, &ci->i_xattrs.index);
  197. dout("__set_xattr_val p=%p\n", p);
  198. }
  199. dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
  200. ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
  201. return 0;
  202. }
  203. static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
  204. const char *name)
  205. {
  206. struct rb_node **p;
  207. struct rb_node *parent = NULL;
  208. struct ceph_inode_xattr *xattr = NULL;
  209. int name_len = strlen(name);
  210. int c;
  211. p = &ci->i_xattrs.index.rb_node;
  212. while (*p) {
  213. parent = *p;
  214. xattr = rb_entry(parent, struct ceph_inode_xattr, node);
  215. c = strncmp(name, xattr->name, xattr->name_len);
  216. if (c == 0 && name_len > xattr->name_len)
  217. c = 1;
  218. if (c < 0)
  219. p = &(*p)->rb_left;
  220. else if (c > 0)
  221. p = &(*p)->rb_right;
  222. else {
  223. dout("__get_xattr %s: found %.*s\n", name,
  224. xattr->val_len, xattr->val);
  225. return xattr;
  226. }
  227. }
  228. dout("__get_xattr %s: not found\n", name);
  229. return NULL;
  230. }
  231. static void __free_xattr(struct ceph_inode_xattr *xattr)
  232. {
  233. BUG_ON(!xattr);
  234. if (xattr->should_free_name)
  235. kfree((void *)xattr->name);
  236. if (xattr->should_free_val)
  237. kfree((void *)xattr->val);
  238. kfree(xattr);
  239. }
  240. static int __remove_xattr(struct ceph_inode_info *ci,
  241. struct ceph_inode_xattr *xattr)
  242. {
  243. if (!xattr)
  244. return -EOPNOTSUPP;
  245. rb_erase(&xattr->node, &ci->i_xattrs.index);
  246. if (xattr->should_free_name)
  247. kfree((void *)xattr->name);
  248. if (xattr->should_free_val)
  249. kfree((void *)xattr->val);
  250. ci->i_xattrs.names_size -= xattr->name_len;
  251. ci->i_xattrs.vals_size -= xattr->val_len;
  252. ci->i_xattrs.count--;
  253. kfree(xattr);
  254. return 0;
  255. }
  256. static int __remove_xattr_by_name(struct ceph_inode_info *ci,
  257. const char *name)
  258. {
  259. struct rb_node **p;
  260. struct ceph_inode_xattr *xattr;
  261. int err;
  262. p = &ci->i_xattrs.index.rb_node;
  263. xattr = __get_xattr(ci, name);
  264. err = __remove_xattr(ci, xattr);
  265. return err;
  266. }
  267. static char *__copy_xattr_names(struct ceph_inode_info *ci,
  268. char *dest)
  269. {
  270. struct rb_node *p;
  271. struct ceph_inode_xattr *xattr = NULL;
  272. p = rb_first(&ci->i_xattrs.index);
  273. dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
  274. while (p) {
  275. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  276. memcpy(dest, xattr->name, xattr->name_len);
  277. dest[xattr->name_len] = '\0';
  278. dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
  279. xattr->name_len, ci->i_xattrs.names_size);
  280. dest += xattr->name_len + 1;
  281. p = rb_next(p);
  282. }
  283. return dest;
  284. }
  285. void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
  286. {
  287. struct rb_node *p, *tmp;
  288. struct ceph_inode_xattr *xattr = NULL;
  289. p = rb_first(&ci->i_xattrs.index);
  290. dout("__ceph_destroy_xattrs p=%p\n", p);
  291. while (p) {
  292. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  293. tmp = p;
  294. p = rb_next(tmp);
  295. dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
  296. xattr->name_len, xattr->name);
  297. rb_erase(tmp, &ci->i_xattrs.index);
  298. __free_xattr(xattr);
  299. }
  300. ci->i_xattrs.names_size = 0;
  301. ci->i_xattrs.vals_size = 0;
  302. ci->i_xattrs.index_version = 0;
  303. ci->i_xattrs.count = 0;
  304. ci->i_xattrs.index = RB_ROOT;
  305. }
  306. static int __build_xattrs(struct inode *inode)
  307. __releases(ci->i_ceph_lock)
  308. __acquires(ci->i_ceph_lock)
  309. {
  310. u32 namelen;
  311. u32 numattr = 0;
  312. void *p, *end;
  313. u32 len;
  314. const char *name, *val;
  315. struct ceph_inode_info *ci = ceph_inode(inode);
  316. int xattr_version;
  317. struct ceph_inode_xattr **xattrs = NULL;
  318. int err = 0;
  319. int i;
  320. dout("__build_xattrs() len=%d\n",
  321. ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
  322. if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
  323. return 0; /* already built */
  324. __ceph_destroy_xattrs(ci);
  325. start:
  326. /* updated internal xattr rb tree */
  327. if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
  328. p = ci->i_xattrs.blob->vec.iov_base;
  329. end = p + ci->i_xattrs.blob->vec.iov_len;
  330. ceph_decode_32_safe(&p, end, numattr, bad);
  331. xattr_version = ci->i_xattrs.version;
  332. spin_unlock(&ci->i_ceph_lock);
  333. xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
  334. GFP_NOFS);
  335. err = -ENOMEM;
  336. if (!xattrs)
  337. goto bad_lock;
  338. memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
  339. for (i = 0; i < numattr; i++) {
  340. xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
  341. GFP_NOFS);
  342. if (!xattrs[i])
  343. goto bad_lock;
  344. }
  345. spin_lock(&ci->i_ceph_lock);
  346. if (ci->i_xattrs.version != xattr_version) {
  347. /* lost a race, retry */
  348. for (i = 0; i < numattr; i++)
  349. kfree(xattrs[i]);
  350. kfree(xattrs);
  351. goto start;
  352. }
  353. err = -EIO;
  354. while (numattr--) {
  355. ceph_decode_32_safe(&p, end, len, bad);
  356. namelen = len;
  357. name = p;
  358. p += len;
  359. ceph_decode_32_safe(&p, end, len, bad);
  360. val = p;
  361. p += len;
  362. err = __set_xattr(ci, name, namelen, val, len,
  363. 0, 0, 0, &xattrs[numattr]);
  364. if (err < 0)
  365. goto bad;
  366. }
  367. kfree(xattrs);
  368. }
  369. ci->i_xattrs.index_version = ci->i_xattrs.version;
  370. ci->i_xattrs.dirty = false;
  371. return err;
  372. bad_lock:
  373. spin_lock(&ci->i_ceph_lock);
  374. bad:
  375. if (xattrs) {
  376. for (i = 0; i < numattr; i++)
  377. kfree(xattrs[i]);
  378. kfree(xattrs);
  379. }
  380. ci->i_xattrs.names_size = 0;
  381. return err;
  382. }
  383. static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
  384. int val_size)
  385. {
  386. /*
  387. * 4 bytes for the length, and additional 4 bytes per each xattr name,
  388. * 4 bytes per each value
  389. */
  390. int size = 4 + ci->i_xattrs.count*(4 + 4) +
  391. ci->i_xattrs.names_size +
  392. ci->i_xattrs.vals_size;
  393. dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
  394. ci->i_xattrs.count, ci->i_xattrs.names_size,
  395. ci->i_xattrs.vals_size);
  396. if (name_size)
  397. size += 4 + 4 + name_size + val_size;
  398. return size;
  399. }
  400. /*
  401. * If there are dirty xattrs, reencode xattrs into the prealloc_blob
  402. * and swap into place.
  403. */
  404. void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
  405. {
  406. struct rb_node *p;
  407. struct ceph_inode_xattr *xattr = NULL;
  408. void *dest;
  409. dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
  410. if (ci->i_xattrs.dirty) {
  411. int need = __get_required_blob_size(ci, 0, 0);
  412. BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
  413. p = rb_first(&ci->i_xattrs.index);
  414. dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
  415. ceph_encode_32(&dest, ci->i_xattrs.count);
  416. while (p) {
  417. xattr = rb_entry(p, struct ceph_inode_xattr, node);
  418. ceph_encode_32(&dest, xattr->name_len);
  419. memcpy(dest, xattr->name, xattr->name_len);
  420. dest += xattr->name_len;
  421. ceph_encode_32(&dest, xattr->val_len);
  422. memcpy(dest, xattr->val, xattr->val_len);
  423. dest += xattr->val_len;
  424. p = rb_next(p);
  425. }
  426. /* adjust buffer len; it may be larger than we need */
  427. ci->i_xattrs.prealloc_blob->vec.iov_len =
  428. dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
  429. if (ci->i_xattrs.blob)
  430. ceph_buffer_put(ci->i_xattrs.blob);
  431. ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
  432. ci->i_xattrs.prealloc_blob = NULL;
  433. ci->i_xattrs.dirty = false;
  434. ci->i_xattrs.version++;
  435. }
  436. }
  437. ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
  438. size_t size)
  439. {
  440. struct inode *inode = dentry->d_inode;
  441. struct ceph_inode_info *ci = ceph_inode(inode);
  442. int err;
  443. struct ceph_inode_xattr *xattr;
  444. struct ceph_vxattr *vxattr = NULL;
  445. if (!ceph_is_valid_xattr(name))
  446. return -ENODATA;
  447. /* let's see if a virtual xattr was requested */
  448. vxattr = ceph_match_vxattr(inode, name);
  449. spin_lock(&ci->i_ceph_lock);
  450. dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
  451. ci->i_xattrs.version, ci->i_xattrs.index_version);
  452. if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
  453. (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
  454. goto get_xattr;
  455. } else {
  456. spin_unlock(&ci->i_ceph_lock);
  457. /* get xattrs from mds (if we don't already have them) */
  458. err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
  459. if (err)
  460. return err;
  461. }
  462. spin_lock(&ci->i_ceph_lock);
  463. if (vxattr && vxattr->readonly) {
  464. err = vxattr->getxattr_cb(ci, value, size);
  465. goto out;
  466. }
  467. err = __build_xattrs(inode);
  468. if (err < 0)
  469. goto out;
  470. get_xattr:
  471. err = -ENODATA; /* == ENOATTR */
  472. xattr = __get_xattr(ci, name);
  473. if (!xattr) {
  474. if (vxattr)
  475. err = vxattr->getxattr_cb(ci, value, size);
  476. goto out;
  477. }
  478. err = -ERANGE;
  479. if (size && size < xattr->val_len)
  480. goto out;
  481. err = xattr->val_len;
  482. if (size == 0)
  483. goto out;
  484. memcpy(value, xattr->val, xattr->val_len);
  485. out:
  486. spin_unlock(&ci->i_ceph_lock);
  487. return err;
  488. }
  489. ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
  490. {
  491. struct inode *inode = dentry->d_inode;
  492. struct ceph_inode_info *ci = ceph_inode(inode);
  493. struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode);
  494. u32 vir_namelen = 0;
  495. u32 namelen;
  496. int err;
  497. u32 len;
  498. int i;
  499. spin_lock(&ci->i_ceph_lock);
  500. dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
  501. ci->i_xattrs.version, ci->i_xattrs.index_version);
  502. if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
  503. (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
  504. goto list_xattr;
  505. } else {
  506. spin_unlock(&ci->i_ceph_lock);
  507. err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
  508. if (err)
  509. return err;
  510. }
  511. spin_lock(&ci->i_ceph_lock);
  512. err = __build_xattrs(inode);
  513. if (err < 0)
  514. goto out;
  515. list_xattr:
  516. vir_namelen = 0;
  517. /* include virtual dir xattrs */
  518. if (vxattrs)
  519. for (i = 0; vxattrs[i].name; i++)
  520. vir_namelen += strlen(vxattrs[i].name) + 1;
  521. /* adding 1 byte per each variable due to the null termination */
  522. namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
  523. err = -ERANGE;
  524. if (size && namelen > size)
  525. goto out;
  526. err = namelen;
  527. if (size == 0)
  528. goto out;
  529. names = __copy_xattr_names(ci, names);
  530. /* virtual xattr names, too */
  531. if (vxattrs)
  532. for (i = 0; vxattrs[i].name; i++) {
  533. len = sprintf(names, "%s", vxattrs[i].name);
  534. names += len + 1;
  535. }
  536. out:
  537. spin_unlock(&ci->i_ceph_lock);
  538. return err;
  539. }
  540. static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
  541. const char *value, size_t size, int flags)
  542. {
  543. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  544. struct inode *inode = dentry->d_inode;
  545. struct ceph_inode_info *ci = ceph_inode(inode);
  546. struct inode *parent_inode;
  547. struct ceph_mds_request *req;
  548. struct ceph_mds_client *mdsc = fsc->mdsc;
  549. int err;
  550. int i, nr_pages;
  551. struct page **pages = NULL;
  552. void *kaddr;
  553. /* copy value into some pages */
  554. nr_pages = calc_pages_for(0, size);
  555. if (nr_pages) {
  556. pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
  557. if (!pages)
  558. return -ENOMEM;
  559. err = -ENOMEM;
  560. for (i = 0; i < nr_pages; i++) {
  561. pages[i] = __page_cache_alloc(GFP_NOFS);
  562. if (!pages[i]) {
  563. nr_pages = i;
  564. goto out;
  565. }
  566. kaddr = kmap(pages[i]);
  567. memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
  568. min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
  569. }
  570. }
  571. dout("setxattr value=%.*s\n", (int)size, value);
  572. /* do request */
  573. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
  574. USE_AUTH_MDS);
  575. if (IS_ERR(req)) {
  576. err = PTR_ERR(req);
  577. goto out;
  578. }
  579. req->r_inode = inode;
  580. ihold(inode);
  581. req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
  582. req->r_num_caps = 1;
  583. req->r_args.setxattr.flags = cpu_to_le32(flags);
  584. req->r_path2 = kstrdup(name, GFP_NOFS);
  585. req->r_pages = pages;
  586. req->r_num_pages = nr_pages;
  587. req->r_data_len = size;
  588. dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
  589. parent_inode = ceph_get_dentry_parent_inode(dentry);
  590. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  591. iput(parent_inode);
  592. ceph_mdsc_put_request(req);
  593. dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
  594. out:
  595. if (pages) {
  596. for (i = 0; i < nr_pages; i++)
  597. __free_page(pages[i]);
  598. kfree(pages);
  599. }
  600. return err;
  601. }
  602. int ceph_setxattr(struct dentry *dentry, const char *name,
  603. const void *value, size_t size, int flags)
  604. {
  605. struct inode *inode = dentry->d_inode;
  606. struct ceph_vxattr *vxattr;
  607. struct ceph_inode_info *ci = ceph_inode(inode);
  608. int err;
  609. int name_len = strlen(name);
  610. int val_len = size;
  611. char *newname = NULL;
  612. char *newval = NULL;
  613. struct ceph_inode_xattr *xattr = NULL;
  614. int issued;
  615. int required_blob_size;
  616. int dirty;
  617. if (ceph_snap(inode) != CEPH_NOSNAP)
  618. return -EROFS;
  619. if (!ceph_is_valid_xattr(name))
  620. return -EOPNOTSUPP;
  621. vxattr = ceph_match_vxattr(inode, name);
  622. if (vxattr && vxattr->readonly)
  623. return -EOPNOTSUPP;
  624. /* preallocate memory for xattr name, value, index node */
  625. err = -ENOMEM;
  626. newname = kmemdup(name, name_len + 1, GFP_NOFS);
  627. if (!newname)
  628. goto out;
  629. if (val_len) {
  630. newval = kmemdup(value, val_len, GFP_NOFS);
  631. if (!newval)
  632. goto out;
  633. }
  634. xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
  635. if (!xattr)
  636. goto out;
  637. spin_lock(&ci->i_ceph_lock);
  638. retry:
  639. issued = __ceph_caps_issued(ci, NULL);
  640. if (!(issued & CEPH_CAP_XATTR_EXCL))
  641. goto do_sync;
  642. __build_xattrs(inode);
  643. required_blob_size = __get_required_blob_size(ci, name_len, val_len);
  644. if (!ci->i_xattrs.prealloc_blob ||
  645. required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
  646. struct ceph_buffer *blob = NULL;
  647. spin_unlock(&ci->i_ceph_lock);
  648. dout(" preaallocating new blob size=%d\n", required_blob_size);
  649. blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
  650. if (!blob)
  651. goto out;
  652. spin_lock(&ci->i_ceph_lock);
  653. if (ci->i_xattrs.prealloc_blob)
  654. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  655. ci->i_xattrs.prealloc_blob = blob;
  656. goto retry;
  657. }
  658. dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
  659. err = __set_xattr(ci, newname, name_len, newval,
  660. val_len, 1, 1, 1, &xattr);
  661. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
  662. ci->i_xattrs.dirty = true;
  663. inode->i_ctime = CURRENT_TIME;
  664. spin_unlock(&ci->i_ceph_lock);
  665. if (dirty)
  666. __mark_inode_dirty(inode, dirty);
  667. return err;
  668. do_sync:
  669. spin_unlock(&ci->i_ceph_lock);
  670. err = ceph_sync_setxattr(dentry, name, value, size, flags);
  671. out:
  672. kfree(newname);
  673. kfree(newval);
  674. kfree(xattr);
  675. return err;
  676. }
  677. static int ceph_send_removexattr(struct dentry *dentry, const char *name)
  678. {
  679. struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
  680. struct ceph_mds_client *mdsc = fsc->mdsc;
  681. struct inode *inode = dentry->d_inode;
  682. struct inode *parent_inode;
  683. struct ceph_mds_request *req;
  684. int err;
  685. req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
  686. USE_AUTH_MDS);
  687. if (IS_ERR(req))
  688. return PTR_ERR(req);
  689. req->r_inode = inode;
  690. ihold(inode);
  691. req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
  692. req->r_num_caps = 1;
  693. req->r_path2 = kstrdup(name, GFP_NOFS);
  694. parent_inode = ceph_get_dentry_parent_inode(dentry);
  695. err = ceph_mdsc_do_request(mdsc, parent_inode, req);
  696. iput(parent_inode);
  697. ceph_mdsc_put_request(req);
  698. return err;
  699. }
  700. int ceph_removexattr(struct dentry *dentry, const char *name)
  701. {
  702. struct inode *inode = dentry->d_inode;
  703. struct ceph_vxattr *vxattr;
  704. struct ceph_inode_info *ci = ceph_inode(inode);
  705. int issued;
  706. int err;
  707. int required_blob_size;
  708. int dirty;
  709. if (ceph_snap(inode) != CEPH_NOSNAP)
  710. return -EROFS;
  711. if (!ceph_is_valid_xattr(name))
  712. return -EOPNOTSUPP;
  713. vxattr = ceph_match_vxattr(inode, name);
  714. if (vxattr && vxattr->readonly)
  715. return -EOPNOTSUPP;
  716. err = -ENOMEM;
  717. spin_lock(&ci->i_ceph_lock);
  718. __build_xattrs(inode);
  719. retry:
  720. issued = __ceph_caps_issued(ci, NULL);
  721. dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
  722. if (!(issued & CEPH_CAP_XATTR_EXCL))
  723. goto do_sync;
  724. required_blob_size = __get_required_blob_size(ci, 0, 0);
  725. if (!ci->i_xattrs.prealloc_blob ||
  726. required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
  727. struct ceph_buffer *blob;
  728. spin_unlock(&ci->i_ceph_lock);
  729. dout(" preaallocating new blob size=%d\n", required_blob_size);
  730. blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
  731. if (!blob)
  732. goto out;
  733. spin_lock(&ci->i_ceph_lock);
  734. if (ci->i_xattrs.prealloc_blob)
  735. ceph_buffer_put(ci->i_xattrs.prealloc_blob);
  736. ci->i_xattrs.prealloc_blob = blob;
  737. goto retry;
  738. }
  739. err = __remove_xattr_by_name(ceph_inode(inode), name);
  740. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
  741. ci->i_xattrs.dirty = true;
  742. inode->i_ctime = CURRENT_TIME;
  743. spin_unlock(&ci->i_ceph_lock);
  744. if (dirty)
  745. __mark_inode_dirty(inode, dirty);
  746. return err;
  747. do_sync:
  748. spin_unlock(&ci->i_ceph_lock);
  749. err = ceph_send_removexattr(dentry, name);
  750. out:
  751. return err;
  752. }