xattr.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125
  1. /*
  2. * Copyright (C) International Business Machines Corp., 2000-2004
  3. * Copyright (C) Christoph Hellwig, 2002
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/xattr.h>
  21. #include <linux/quotaops.h>
  22. #include "jfs_incore.h"
  23. #include "jfs_superblock.h"
  24. #include "jfs_dmap.h"
  25. #include "jfs_debug.h"
  26. #include "jfs_dinode.h"
  27. #include "jfs_extent.h"
  28. #include "jfs_metapage.h"
  29. #include "jfs_xattr.h"
  30. #include "jfs_acl.h"
  31. /*
  32. * jfs_xattr.c: extended attribute service
  33. *
  34. * Overall design --
  35. *
  36. * Format:
  37. *
  38. * Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit
  39. * value) and a variable (0 or more) number of extended attribute
  40. * entries. Each extended attribute entry (jfs_ea) is a <name,value> double
  41. * where <name> is constructed from a null-terminated ascii string
  42. * (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data
  43. * (1 ... 65535 bytes). The in-memory format is
  44. *
  45. * 0 1 2 4 4 + namelen + 1
  46. * +-------+--------+--------+----------------+-------------------+
  47. * | Flags | Name | Value | Name String \0 | Data . . . . |
  48. * | | Length | Length | | |
  49. * +-------+--------+--------+----------------+-------------------+
  50. *
  51. * A jfs_ea_list then is structured as
  52. *
  53. * 0 4 4 + EA_SIZE(ea1)
  54. * +------------+-------------------+--------------------+-----
  55. * | Overall EA | First FEA Element | Second FEA Element | .....
  56. * | List Size | | |
  57. * +------------+-------------------+--------------------+-----
  58. *
  59. * On-disk:
  60. *
  61. * FEALISTs are stored on disk using blocks allocated by dbAlloc() and
  62. * written directly. An EA list may be in-lined in the inode if there is
  63. * sufficient room available.
  64. */
  65. struct ea_buffer {
  66. int flag; /* Indicates what storage xattr points to */
  67. int max_size; /* largest xattr that fits in current buffer */
  68. dxd_t new_ea; /* dxd to replace ea when modifying xattr */
  69. struct metapage *mp; /* metapage containing ea list */
  70. struct jfs_ea_list *xattr; /* buffer containing ea list */
  71. };
  72. /*
  73. * ea_buffer.flag values
  74. */
  75. #define EA_INLINE 0x0001
  76. #define EA_EXTENT 0x0002
  77. #define EA_NEW 0x0004
  78. #define EA_MALLOC 0x0008
  79. /* Namespaces */
  80. #define XATTR_SYSTEM_PREFIX "system."
  81. #define XATTR_SYSTEM_PREFIX_LEN (sizeof (XATTR_SYSTEM_PREFIX) - 1)
  82. #define XATTR_USER_PREFIX "user."
  83. #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
  84. #define XATTR_OS2_PREFIX "os2."
  85. #define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1)
  86. /* XATTR_SECURITY_PREFIX is defined in include/linux/xattr.h */
  87. #define XATTR_SECURITY_PREFIX_LEN (sizeof (XATTR_SECURITY_PREFIX) - 1)
  88. #define XATTR_TRUSTED_PREFIX "trusted."
  89. #define XATTR_TRUSTED_PREFIX_LEN (sizeof (XATTR_TRUSTED_PREFIX) - 1)
  90. /*
  91. * These three routines are used to recognize on-disk extended attributes
  92. * that are in a recognized namespace. If the attribute is not recognized,
  93. * "os2." is prepended to the name
  94. */
  95. static inline int is_os2_xattr(struct jfs_ea *ea)
  96. {
  97. /*
  98. * Check for "system."
  99. */
  100. if ((ea->namelen >= XATTR_SYSTEM_PREFIX_LEN) &&
  101. !strncmp(ea->name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
  102. return FALSE;
  103. /*
  104. * Check for "user."
  105. */
  106. if ((ea->namelen >= XATTR_USER_PREFIX_LEN) &&
  107. !strncmp(ea->name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
  108. return FALSE;
  109. /*
  110. * Check for "security."
  111. */
  112. if ((ea->namelen >= XATTR_SECURITY_PREFIX_LEN) &&
  113. !strncmp(ea->name, XATTR_SECURITY_PREFIX,
  114. XATTR_SECURITY_PREFIX_LEN))
  115. return FALSE;
  116. /*
  117. * Check for "trusted."
  118. */
  119. if ((ea->namelen >= XATTR_TRUSTED_PREFIX_LEN) &&
  120. !strncmp(ea->name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
  121. return FALSE;
  122. /*
  123. * Add any other valid namespace prefixes here
  124. */
  125. /*
  126. * We assume it's OS/2's flat namespace
  127. */
  128. return TRUE;
  129. }
  130. static inline int name_size(struct jfs_ea *ea)
  131. {
  132. if (is_os2_xattr(ea))
  133. return ea->namelen + XATTR_OS2_PREFIX_LEN;
  134. else
  135. return ea->namelen;
  136. }
  137. static inline int copy_name(char *buffer, struct jfs_ea *ea)
  138. {
  139. int len = ea->namelen;
  140. if (is_os2_xattr(ea)) {
  141. memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN);
  142. buffer += XATTR_OS2_PREFIX_LEN;
  143. len += XATTR_OS2_PREFIX_LEN;
  144. }
  145. memcpy(buffer, ea->name, ea->namelen);
  146. buffer[ea->namelen] = 0;
  147. return len;
  148. }
  149. /* Forward references */
  150. static void ea_release(struct inode *inode, struct ea_buffer *ea_buf);
  151. /*
  152. * NAME: ea_write_inline
  153. *
  154. * FUNCTION: Attempt to write an EA inline if area is available
  155. *
  156. * PRE CONDITIONS:
  157. * Already verified that the specified EA is small enough to fit inline
  158. *
  159. * PARAMETERS:
  160. * ip - Inode pointer
  161. * ealist - EA list pointer
  162. * size - size of ealist in bytes
  163. * ea - dxd_t structure to be filled in with necessary EA information
  164. * if we successfully copy the EA inline
  165. *
  166. * NOTES:
  167. * Checks if the inode's inline area is available. If so, copies EA inline
  168. * and sets <ea> fields appropriately. Otherwise, returns failure, EA will
  169. * have to be put into an extent.
  170. *
  171. * RETURNS: 0 for successful copy to inline area; -1 if area not available
  172. */
  173. static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist,
  174. int size, dxd_t * ea)
  175. {
  176. struct jfs_inode_info *ji = JFS_IP(ip);
  177. /*
  178. * Make sure we have an EA -- the NULL EA list is valid, but you
  179. * can't copy it!
  180. */
  181. if (ealist && size > sizeof (struct jfs_ea_list)) {
  182. assert(size <= sizeof (ji->i_inline_ea));
  183. /*
  184. * See if the space is available or if it is already being
  185. * used for an inline EA.
  186. */
  187. if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE))
  188. return -EPERM;
  189. DXDsize(ea, size);
  190. DXDlength(ea, 0);
  191. DXDaddress(ea, 0);
  192. memcpy(ji->i_inline_ea, ealist, size);
  193. ea->flag = DXD_INLINE;
  194. ji->mode2 &= ~INLINEEA;
  195. } else {
  196. ea->flag = 0;
  197. DXDsize(ea, 0);
  198. DXDlength(ea, 0);
  199. DXDaddress(ea, 0);
  200. /* Free up INLINE area */
  201. if (ji->ea.flag & DXD_INLINE)
  202. ji->mode2 |= INLINEEA;
  203. }
  204. return 0;
  205. }
  206. /*
  207. * NAME: ea_write
  208. *
  209. * FUNCTION: Write an EA for an inode
  210. *
  211. * PRE CONDITIONS: EA has been verified
  212. *
  213. * PARAMETERS:
  214. * ip - Inode pointer
  215. * ealist - EA list pointer
  216. * size - size of ealist in bytes
  217. * ea - dxd_t structure to be filled in appropriately with where the
  218. * EA was copied
  219. *
  220. * NOTES: Will write EA inline if able to, otherwise allocates blocks for an
  221. * extent and synchronously writes it to those blocks.
  222. *
  223. * RETURNS: 0 for success; Anything else indicates failure
  224. */
  225. static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
  226. dxd_t * ea)
  227. {
  228. struct super_block *sb = ip->i_sb;
  229. struct jfs_inode_info *ji = JFS_IP(ip);
  230. struct jfs_sb_info *sbi = JFS_SBI(sb);
  231. int nblocks;
  232. s64 blkno;
  233. int rc = 0, i;
  234. char *cp;
  235. s32 nbytes, nb;
  236. s32 bytes_to_write;
  237. struct metapage *mp;
  238. /*
  239. * Quick check to see if this is an in-linable EA. Short EAs
  240. * and empty EAs are all in-linable, provided the space exists.
  241. */
  242. if (!ealist || size <= sizeof (ji->i_inline_ea)) {
  243. if (!ea_write_inline(ip, ealist, size, ea))
  244. return 0;
  245. }
  246. /* figure out how many blocks we need */
  247. nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
  248. /* Allocate new blocks to quota. */
  249. if (DQUOT_ALLOC_BLOCK(ip, nblocks)) {
  250. return -EDQUOT;
  251. }
  252. rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
  253. if (rc) {
  254. /*Rollback quota allocation. */
  255. DQUOT_FREE_BLOCK(ip, nblocks);
  256. return rc;
  257. }
  258. /*
  259. * Now have nblocks worth of storage to stuff into the FEALIST.
  260. * loop over the FEALIST copying data into the buffer one page at
  261. * a time.
  262. */
  263. cp = (char *) ealist;
  264. nbytes = size;
  265. for (i = 0; i < nblocks; i += sbi->nbperpage) {
  266. /*
  267. * Determine how many bytes for this request, and round up to
  268. * the nearest aggregate block size
  269. */
  270. nb = min(PSIZE, nbytes);
  271. bytes_to_write =
  272. ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
  273. << sb->s_blocksize_bits;
  274. if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) {
  275. rc = -EIO;
  276. goto failed;
  277. }
  278. memcpy(mp->data, cp, nb);
  279. /*
  280. * We really need a way to propagate errors for
  281. * forced writes like this one. --hch
  282. *
  283. * (__write_metapage => release_metapage => flush_metapage)
  284. */
  285. #ifdef _JFS_FIXME
  286. if ((rc = flush_metapage(mp))) {
  287. /*
  288. * the write failed -- this means that the buffer
  289. * is still assigned and the blocks are not being
  290. * used. this seems like the best error recovery
  291. * we can get ...
  292. */
  293. goto failed;
  294. }
  295. #else
  296. flush_metapage(mp);
  297. #endif
  298. cp += PSIZE;
  299. nbytes -= nb;
  300. }
  301. ea->flag = DXD_EXTENT;
  302. DXDsize(ea, le32_to_cpu(ealist->size));
  303. DXDlength(ea, nblocks);
  304. DXDaddress(ea, blkno);
  305. /* Free up INLINE area */
  306. if (ji->ea.flag & DXD_INLINE)
  307. ji->mode2 |= INLINEEA;
  308. return 0;
  309. failed:
  310. /* Rollback quota allocation. */
  311. DQUOT_FREE_BLOCK(ip, nblocks);
  312. dbFree(ip, blkno, nblocks);
  313. return rc;
  314. }
  315. /*
  316. * NAME: ea_read_inline
  317. *
  318. * FUNCTION: Read an inlined EA into user's buffer
  319. *
  320. * PARAMETERS:
  321. * ip - Inode pointer
  322. * ealist - Pointer to buffer to fill in with EA
  323. *
  324. * RETURNS: 0
  325. */
  326. static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist)
  327. {
  328. struct jfs_inode_info *ji = JFS_IP(ip);
  329. int ea_size = sizeDXD(&ji->ea);
  330. if (ea_size == 0) {
  331. ealist->size = 0;
  332. return 0;
  333. }
  334. /* Sanity Check */
  335. if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea)))
  336. return -EIO;
  337. if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size)
  338. != ea_size)
  339. return -EIO;
  340. memcpy(ealist, ji->i_inline_ea, ea_size);
  341. return 0;
  342. }
  343. /*
  344. * NAME: ea_read
  345. *
  346. * FUNCTION: copy EA data into user's buffer
  347. *
  348. * PARAMETERS:
  349. * ip - Inode pointer
  350. * ealist - Pointer to buffer to fill in with EA
  351. *
  352. * NOTES: If EA is inline calls ea_read_inline() to copy EA.
  353. *
  354. * RETURNS: 0 for success; other indicates failure
  355. */
  356. static int ea_read(struct inode *ip, struct jfs_ea_list *ealist)
  357. {
  358. struct super_block *sb = ip->i_sb;
  359. struct jfs_inode_info *ji = JFS_IP(ip);
  360. struct jfs_sb_info *sbi = JFS_SBI(sb);
  361. int nblocks;
  362. s64 blkno;
  363. char *cp = (char *) ealist;
  364. int i;
  365. int nbytes, nb;
  366. s32 bytes_to_read;
  367. struct metapage *mp;
  368. /* quick check for in-line EA */
  369. if (ji->ea.flag & DXD_INLINE)
  370. return ea_read_inline(ip, ealist);
  371. nbytes = sizeDXD(&ji->ea);
  372. if (!nbytes) {
  373. jfs_error(sb, "ea_read: nbytes is 0");
  374. return -EIO;
  375. }
  376. /*
  377. * Figure out how many blocks were allocated when this EA list was
  378. * originally written to disk.
  379. */
  380. nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage;
  381. blkno = addressDXD(&ji->ea) << sbi->l2nbperpage;
  382. /*
  383. * I have found the disk blocks which were originally used to store
  384. * the FEALIST. now i loop over each contiguous block copying the
  385. * data into the buffer.
  386. */
  387. for (i = 0; i < nblocks; i += sbi->nbperpage) {
  388. /*
  389. * Determine how many bytes for this request, and round up to
  390. * the nearest aggregate block size
  391. */
  392. nb = min(PSIZE, nbytes);
  393. bytes_to_read =
  394. ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
  395. << sb->s_blocksize_bits;
  396. if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1)))
  397. return -EIO;
  398. memcpy(cp, mp->data, nb);
  399. release_metapage(mp);
  400. cp += PSIZE;
  401. nbytes -= nb;
  402. }
  403. return 0;
  404. }
  405. /*
  406. * NAME: ea_get
  407. *
  408. * FUNCTION: Returns buffer containing existing extended attributes.
  409. * The size of the buffer will be the larger of the existing
  410. * attributes size, or min_size.
  411. *
  412. * The buffer, which may be inlined in the inode or in the
  413. * page cache must be release by calling ea_release or ea_put
  414. *
  415. * PARAMETERS:
  416. * inode - Inode pointer
  417. * ea_buf - Structure to be populated with ealist and its metadata
  418. * min_size- minimum size of buffer to be returned
  419. *
  420. * RETURNS: 0 for success; Other indicates failure
  421. */
  422. static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
  423. {
  424. struct jfs_inode_info *ji = JFS_IP(inode);
  425. struct super_block *sb = inode->i_sb;
  426. int size;
  427. int ea_size = sizeDXD(&ji->ea);
  428. int blocks_needed, current_blocks;
  429. s64 blkno;
  430. int rc;
  431. int quota_allocation = 0;
  432. /* When fsck.jfs clears a bad ea, it doesn't clear the size */
  433. if (ji->ea.flag == 0)
  434. ea_size = 0;
  435. if (ea_size == 0) {
  436. if (min_size == 0) {
  437. ea_buf->flag = 0;
  438. ea_buf->max_size = 0;
  439. ea_buf->xattr = NULL;
  440. return 0;
  441. }
  442. if ((min_size <= sizeof (ji->i_inline_ea)) &&
  443. (ji->mode2 & INLINEEA)) {
  444. ea_buf->flag = EA_INLINE | EA_NEW;
  445. ea_buf->max_size = sizeof (ji->i_inline_ea);
  446. ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
  447. DXDlength(&ea_buf->new_ea, 0);
  448. DXDaddress(&ea_buf->new_ea, 0);
  449. ea_buf->new_ea.flag = DXD_INLINE;
  450. DXDsize(&ea_buf->new_ea, min_size);
  451. return 0;
  452. }
  453. current_blocks = 0;
  454. } else if (ji->ea.flag & DXD_INLINE) {
  455. if (min_size <= sizeof (ji->i_inline_ea)) {
  456. ea_buf->flag = EA_INLINE;
  457. ea_buf->max_size = sizeof (ji->i_inline_ea);
  458. ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
  459. goto size_check;
  460. }
  461. current_blocks = 0;
  462. } else {
  463. if (!(ji->ea.flag & DXD_EXTENT)) {
  464. jfs_error(sb, "ea_get: invalid ea.flag)");
  465. return -EIO;
  466. }
  467. current_blocks = (ea_size + sb->s_blocksize - 1) >>
  468. sb->s_blocksize_bits;
  469. }
  470. size = max(min_size, ea_size);
  471. if (size > PSIZE) {
  472. /*
  473. * To keep the rest of the code simple. Allocate a
  474. * contiguous buffer to work with
  475. */
  476. ea_buf->xattr = kmalloc(size, GFP_KERNEL);
  477. if (ea_buf->xattr == NULL)
  478. return -ENOMEM;
  479. ea_buf->flag = EA_MALLOC;
  480. ea_buf->max_size = (size + sb->s_blocksize - 1) &
  481. ~(sb->s_blocksize - 1);
  482. if (ea_size == 0)
  483. return 0;
  484. if ((rc = ea_read(inode, ea_buf->xattr))) {
  485. kfree(ea_buf->xattr);
  486. ea_buf->xattr = NULL;
  487. return rc;
  488. }
  489. goto size_check;
  490. }
  491. blocks_needed = (min_size + sb->s_blocksize - 1) >>
  492. sb->s_blocksize_bits;
  493. if (blocks_needed > current_blocks) {
  494. /* Allocate new blocks to quota. */
  495. if (DQUOT_ALLOC_BLOCK(inode, blocks_needed))
  496. return -EDQUOT;
  497. quota_allocation = blocks_needed;
  498. rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed,
  499. &blkno);
  500. if (rc)
  501. goto clean_up;
  502. DXDlength(&ea_buf->new_ea, blocks_needed);
  503. DXDaddress(&ea_buf->new_ea, blkno);
  504. ea_buf->new_ea.flag = DXD_EXTENT;
  505. DXDsize(&ea_buf->new_ea, min_size);
  506. ea_buf->flag = EA_EXTENT | EA_NEW;
  507. ea_buf->mp = get_metapage(inode, blkno,
  508. blocks_needed << sb->s_blocksize_bits,
  509. 1);
  510. if (ea_buf->mp == NULL) {
  511. dbFree(inode, blkno, (s64) blocks_needed);
  512. rc = -EIO;
  513. goto clean_up;
  514. }
  515. ea_buf->xattr = ea_buf->mp->data;
  516. ea_buf->max_size = (min_size + sb->s_blocksize - 1) &
  517. ~(sb->s_blocksize - 1);
  518. if (ea_size == 0)
  519. return 0;
  520. if ((rc = ea_read(inode, ea_buf->xattr))) {
  521. discard_metapage(ea_buf->mp);
  522. dbFree(inode, blkno, (s64) blocks_needed);
  523. goto clean_up;
  524. }
  525. goto size_check;
  526. }
  527. ea_buf->flag = EA_EXTENT;
  528. ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea),
  529. lengthDXD(&ji->ea) << sb->s_blocksize_bits,
  530. 1);
  531. if (ea_buf->mp == NULL) {
  532. rc = -EIO;
  533. goto clean_up;
  534. }
  535. ea_buf->xattr = ea_buf->mp->data;
  536. ea_buf->max_size = (ea_size + sb->s_blocksize - 1) &
  537. ~(sb->s_blocksize - 1);
  538. size_check:
  539. if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
  540. printk(KERN_ERR "ea_get: invalid extended attribute\n");
  541. dump_mem("xattr", ea_buf->xattr, ea_size);
  542. ea_release(inode, ea_buf);
  543. rc = -EIO;
  544. goto clean_up;
  545. }
  546. return ea_size;
  547. clean_up:
  548. /* Rollback quota allocation */
  549. if (quota_allocation)
  550. DQUOT_FREE_BLOCK(inode, quota_allocation);
  551. return (rc);
  552. }
  553. static void ea_release(struct inode *inode, struct ea_buffer *ea_buf)
  554. {
  555. if (ea_buf->flag & EA_MALLOC)
  556. kfree(ea_buf->xattr);
  557. else if (ea_buf->flag & EA_EXTENT) {
  558. assert(ea_buf->mp);
  559. release_metapage(ea_buf->mp);
  560. if (ea_buf->flag & EA_NEW)
  561. dbFree(inode, addressDXD(&ea_buf->new_ea),
  562. lengthDXD(&ea_buf->new_ea));
  563. }
  564. }
  565. static int ea_put(struct inode *inode, struct ea_buffer *ea_buf, int new_size)
  566. {
  567. struct jfs_inode_info *ji = JFS_IP(inode);
  568. unsigned long old_blocks, new_blocks;
  569. int rc = 0;
  570. tid_t tid;
  571. if (new_size == 0) {
  572. ea_release(inode, ea_buf);
  573. ea_buf = NULL;
  574. } else if (ea_buf->flag & EA_INLINE) {
  575. assert(new_size <= sizeof (ji->i_inline_ea));
  576. ji->mode2 &= ~INLINEEA;
  577. ea_buf->new_ea.flag = DXD_INLINE;
  578. DXDsize(&ea_buf->new_ea, new_size);
  579. DXDaddress(&ea_buf->new_ea, 0);
  580. DXDlength(&ea_buf->new_ea, 0);
  581. } else if (ea_buf->flag & EA_MALLOC) {
  582. rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
  583. kfree(ea_buf->xattr);
  584. } else if (ea_buf->flag & EA_NEW) {
  585. /* We have already allocated a new dxd */
  586. flush_metapage(ea_buf->mp);
  587. } else {
  588. /* ->xattr must point to original ea's metapage */
  589. rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
  590. discard_metapage(ea_buf->mp);
  591. }
  592. if (rc)
  593. return rc;
  594. tid = txBegin(inode->i_sb, 0);
  595. down(&ji->commit_sem);
  596. old_blocks = new_blocks = 0;
  597. if (ji->ea.flag & DXD_EXTENT) {
  598. invalidate_dxd_metapages(inode, ji->ea);
  599. old_blocks = lengthDXD(&ji->ea);
  600. }
  601. if (ea_buf) {
  602. txEA(tid, inode, &ji->ea, &ea_buf->new_ea);
  603. if (ea_buf->new_ea.flag & DXD_EXTENT) {
  604. new_blocks = lengthDXD(&ea_buf->new_ea);
  605. if (ji->ea.flag & DXD_INLINE)
  606. ji->mode2 |= INLINEEA;
  607. }
  608. ji->ea = ea_buf->new_ea;
  609. } else {
  610. txEA(tid, inode, &ji->ea, NULL);
  611. if (ji->ea.flag & DXD_INLINE)
  612. ji->mode2 |= INLINEEA;
  613. ji->ea.flag = 0;
  614. ji->ea.size = 0;
  615. }
  616. /* If old blocks exist, they must be removed from quota allocation. */
  617. if (old_blocks)
  618. DQUOT_FREE_BLOCK(inode, old_blocks);
  619. inode->i_ctime = CURRENT_TIME;
  620. rc = txCommit(tid, 1, &inode, 0);
  621. txEnd(tid);
  622. up(&ji->commit_sem);
  623. return rc;
  624. }
  625. /*
  626. * can_set_system_xattr
  627. *
  628. * This code is specific to the system.* namespace. It contains policy
  629. * which doesn't belong in the main xattr codepath.
  630. */
  631. static int can_set_system_xattr(struct inode *inode, const char *name,
  632. const void *value, size_t value_len)
  633. {
  634. #ifdef CONFIG_JFS_POSIX_ACL
  635. struct posix_acl *acl;
  636. int rc;
  637. if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
  638. return -EPERM;
  639. /*
  640. * XATTR_NAME_ACL_ACCESS is tied to i_mode
  641. */
  642. if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0) {
  643. acl = posix_acl_from_xattr(value, value_len);
  644. if (IS_ERR(acl)) {
  645. rc = PTR_ERR(acl);
  646. printk(KERN_ERR "posix_acl_from_xattr returned %d\n",
  647. rc);
  648. return rc;
  649. }
  650. if (acl) {
  651. mode_t mode = inode->i_mode;
  652. rc = posix_acl_equiv_mode(acl, &mode);
  653. posix_acl_release(acl);
  654. if (rc < 0) {
  655. printk(KERN_ERR
  656. "posix_acl_equiv_mode returned %d\n",
  657. rc);
  658. return rc;
  659. }
  660. inode->i_mode = mode;
  661. mark_inode_dirty(inode);
  662. }
  663. /*
  664. * We're changing the ACL. Get rid of the cached one
  665. */
  666. acl =JFS_IP(inode)->i_acl;
  667. if (acl != JFS_ACL_NOT_CACHED)
  668. posix_acl_release(acl);
  669. JFS_IP(inode)->i_acl = JFS_ACL_NOT_CACHED;
  670. return 0;
  671. } else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0) {
  672. acl = posix_acl_from_xattr(value, value_len);
  673. if (IS_ERR(acl)) {
  674. rc = PTR_ERR(acl);
  675. printk(KERN_ERR "posix_acl_from_xattr returned %d\n",
  676. rc);
  677. return rc;
  678. }
  679. posix_acl_release(acl);
  680. /*
  681. * We're changing the default ACL. Get rid of the cached one
  682. */
  683. acl =JFS_IP(inode)->i_default_acl;
  684. if (acl && (acl != JFS_ACL_NOT_CACHED))
  685. posix_acl_release(acl);
  686. JFS_IP(inode)->i_default_acl = JFS_ACL_NOT_CACHED;
  687. return 0;
  688. }
  689. #endif /* CONFIG_JFS_POSIX_ACL */
  690. return -EOPNOTSUPP;
  691. }
  692. static int can_set_xattr(struct inode *inode, const char *name,
  693. const void *value, size_t value_len)
  694. {
  695. if (IS_RDONLY(inode))
  696. return -EROFS;
  697. if (IS_IMMUTABLE(inode) || IS_APPEND(inode) || S_ISLNK(inode->i_mode))
  698. return -EPERM;
  699. if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0)
  700. /*
  701. * "system.*"
  702. */
  703. return can_set_system_xattr(inode, name, value, value_len);
  704. if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
  705. return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
  706. #ifdef CONFIG_JFS_SECURITY
  707. if (strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)
  708. != 0)
  709. return 0; /* Leave it to the security module */
  710. #endif
  711. if((strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) != 0) &&
  712. (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) != 0))
  713. return -EOPNOTSUPP;
  714. if (!S_ISREG(inode->i_mode) &&
  715. (!S_ISDIR(inode->i_mode) || inode->i_mode &S_ISVTX))
  716. return -EPERM;
  717. return permission(inode, MAY_WRITE, NULL);
  718. }
  719. int __jfs_setxattr(struct inode *inode, const char *name, const void *value,
  720. size_t value_len, int flags)
  721. {
  722. struct jfs_ea_list *ealist;
  723. struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL;
  724. struct ea_buffer ea_buf;
  725. int old_ea_size = 0;
  726. int xattr_size;
  727. int new_size;
  728. int namelen = strlen(name);
  729. char *os2name = NULL;
  730. int found = 0;
  731. int rc;
  732. int length;
  733. if ((rc = can_set_xattr(inode, name, value, value_len)))
  734. return rc;
  735. if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
  736. os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
  737. GFP_KERNEL);
  738. if (!os2name)
  739. return -ENOMEM;
  740. strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
  741. name = os2name;
  742. namelen -= XATTR_OS2_PREFIX_LEN;
  743. }
  744. down_write(&JFS_IP(inode)->xattr_sem);
  745. xattr_size = ea_get(inode, &ea_buf, 0);
  746. if (xattr_size < 0) {
  747. rc = xattr_size;
  748. goto out;
  749. }
  750. again:
  751. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  752. new_size = sizeof (struct jfs_ea_list);
  753. if (xattr_size) {
  754. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist);
  755. ea = NEXT_EA(ea)) {
  756. if ((namelen == ea->namelen) &&
  757. (memcmp(name, ea->name, namelen) == 0)) {
  758. found = 1;
  759. if (flags & XATTR_CREATE) {
  760. rc = -EEXIST;
  761. goto release;
  762. }
  763. old_ea = ea;
  764. old_ea_size = EA_SIZE(ea);
  765. next_ea = NEXT_EA(ea);
  766. } else
  767. new_size += EA_SIZE(ea);
  768. }
  769. }
  770. if (!found) {
  771. if (flags & XATTR_REPLACE) {
  772. rc = -ENODATA;
  773. goto release;
  774. }
  775. if (value == NULL) {
  776. rc = 0;
  777. goto release;
  778. }
  779. }
  780. if (value)
  781. new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len;
  782. if (new_size > ea_buf.max_size) {
  783. /*
  784. * We need to allocate more space for merged ea list.
  785. * We should only have loop to again: once.
  786. */
  787. ea_release(inode, &ea_buf);
  788. xattr_size = ea_get(inode, &ea_buf, new_size);
  789. if (xattr_size < 0) {
  790. rc = xattr_size;
  791. goto out;
  792. }
  793. goto again;
  794. }
  795. /* Remove old ea of the same name */
  796. if (found) {
  797. /* number of bytes following target EA */
  798. length = (char *) END_EALIST(ealist) - (char *) next_ea;
  799. if (length > 0)
  800. memmove(old_ea, next_ea, length);
  801. xattr_size -= old_ea_size;
  802. }
  803. /* Add new entry to the end */
  804. if (value) {
  805. if (xattr_size == 0)
  806. /* Completely new ea list */
  807. xattr_size = sizeof (struct jfs_ea_list);
  808. ea = (struct jfs_ea *) ((char *) ealist + xattr_size);
  809. ea->flag = 0;
  810. ea->namelen = namelen;
  811. ea->valuelen = (cpu_to_le16(value_len));
  812. memcpy(ea->name, name, namelen);
  813. ea->name[namelen] = 0;
  814. if (value_len)
  815. memcpy(&ea->name[namelen + 1], value, value_len);
  816. xattr_size += EA_SIZE(ea);
  817. }
  818. /* DEBUG - If we did this right, these number match */
  819. if (xattr_size != new_size) {
  820. printk(KERN_ERR
  821. "jfs_xsetattr: xattr_size = %d, new_size = %d\n",
  822. xattr_size, new_size);
  823. rc = -EINVAL;
  824. goto release;
  825. }
  826. /*
  827. * If we're left with an empty list, there's no ea
  828. */
  829. if (new_size == sizeof (struct jfs_ea_list))
  830. new_size = 0;
  831. ealist->size = cpu_to_le32(new_size);
  832. rc = ea_put(inode, &ea_buf, new_size);
  833. goto out;
  834. release:
  835. ea_release(inode, &ea_buf);
  836. out:
  837. up_write(&JFS_IP(inode)->xattr_sem);
  838. kfree(os2name);
  839. return rc;
  840. }
  841. int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
  842. size_t value_len, int flags)
  843. {
  844. if (value == NULL) { /* empty EA, do not remove */
  845. value = "";
  846. value_len = 0;
  847. }
  848. return __jfs_setxattr(dentry->d_inode, name, value, value_len, flags);
  849. }
  850. static int can_get_xattr(struct inode *inode, const char *name)
  851. {
  852. #ifdef CONFIG_JFS_SECURITY
  853. if(strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0)
  854. return 0;
  855. #endif
  856. if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0)
  857. return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
  858. if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0)
  859. return 0;
  860. return permission(inode, MAY_READ, NULL);
  861. }
  862. ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
  863. size_t buf_size)
  864. {
  865. struct jfs_ea_list *ealist;
  866. struct jfs_ea *ea;
  867. struct ea_buffer ea_buf;
  868. int xattr_size;
  869. ssize_t size;
  870. int namelen = strlen(name);
  871. char *os2name = NULL;
  872. int rc;
  873. char *value;
  874. if ((rc = can_get_xattr(inode, name)))
  875. return rc;
  876. if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
  877. os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
  878. GFP_KERNEL);
  879. if (!os2name)
  880. return -ENOMEM;
  881. strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
  882. name = os2name;
  883. namelen -= XATTR_OS2_PREFIX_LEN;
  884. }
  885. down_read(&JFS_IP(inode)->xattr_sem);
  886. xattr_size = ea_get(inode, &ea_buf, 0);
  887. if (xattr_size < 0) {
  888. size = xattr_size;
  889. goto out;
  890. }
  891. if (xattr_size == 0)
  892. goto not_found;
  893. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  894. /* Find the named attribute */
  895. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
  896. if ((namelen == ea->namelen) &&
  897. memcmp(name, ea->name, namelen) == 0) {
  898. /* Found it */
  899. size = le16_to_cpu(ea->valuelen);
  900. if (!data)
  901. goto release;
  902. else if (size > buf_size) {
  903. size = -ERANGE;
  904. goto release;
  905. }
  906. value = ((char *) &ea->name) + ea->namelen + 1;
  907. memcpy(data, value, size);
  908. goto release;
  909. }
  910. not_found:
  911. size = -ENODATA;
  912. release:
  913. ea_release(inode, &ea_buf);
  914. out:
  915. up_read(&JFS_IP(inode)->xattr_sem);
  916. kfree(os2name);
  917. return size;
  918. }
  919. ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data,
  920. size_t buf_size)
  921. {
  922. int err;
  923. err = __jfs_getxattr(dentry->d_inode, name, data, buf_size);
  924. return err;
  925. }
  926. /*
  927. * No special permissions are needed to list attributes except for trusted.*
  928. */
  929. static inline int can_list(struct jfs_ea *ea)
  930. {
  931. return (strncmp(ea->name, XATTR_TRUSTED_PREFIX,
  932. XATTR_TRUSTED_PREFIX_LEN) ||
  933. capable(CAP_SYS_ADMIN));
  934. }
  935. ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
  936. {
  937. struct inode *inode = dentry->d_inode;
  938. char *buffer;
  939. ssize_t size = 0;
  940. int xattr_size;
  941. struct jfs_ea_list *ealist;
  942. struct jfs_ea *ea;
  943. struct ea_buffer ea_buf;
  944. down_read(&JFS_IP(inode)->xattr_sem);
  945. xattr_size = ea_get(inode, &ea_buf, 0);
  946. if (xattr_size < 0) {
  947. size = xattr_size;
  948. goto out;
  949. }
  950. if (xattr_size == 0)
  951. goto release;
  952. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  953. /* compute required size of list */
  954. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
  955. if (can_list(ea))
  956. size += name_size(ea) + 1;
  957. }
  958. if (!data)
  959. goto release;
  960. if (size > buf_size) {
  961. size = -ERANGE;
  962. goto release;
  963. }
  964. /* Copy attribute names to buffer */
  965. buffer = data;
  966. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
  967. if (can_list(ea)) {
  968. int namelen = copy_name(buffer, ea);
  969. buffer += namelen + 1;
  970. }
  971. }
  972. release:
  973. ea_release(inode, &ea_buf);
  974. out:
  975. up_read(&JFS_IP(inode)->xattr_sem);
  976. return size;
  977. }
  978. int jfs_removexattr(struct dentry *dentry, const char *name)
  979. {
  980. return __jfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
  981. }