eattr.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/completion.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/xattr.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include <linux/lm_interface.h>
  16. #include <asm/uaccess.h>
  17. #include "gfs2.h"
  18. #include "incore.h"
  19. #include "acl.h"
  20. #include "eaops.h"
  21. #include "eattr.h"
  22. #include "glock.h"
  23. #include "inode.h"
  24. #include "meta_io.h"
  25. #include "quota.h"
  26. #include "rgrp.h"
  27. #include "trans.h"
  28. #include "util.h"
  29. /**
  30. * ea_calc_size - returns the acutal number of bytes the request will take up
  31. * (not counting any unstuffed data blocks)
  32. * @sdp:
  33. * @er:
  34. * @size:
  35. *
  36. * Returns: 1 if the EA should be stuffed
  37. */
  38. static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
  39. unsigned int *size)
  40. {
  41. *size = GFS2_EAREQ_SIZE_STUFFED(er);
  42. if (*size <= sdp->sd_jbsize)
  43. return 1;
  44. *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
  45. return 0;
  46. }
  47. static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
  48. {
  49. unsigned int size;
  50. if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
  51. return -ERANGE;
  52. ea_calc_size(sdp, er, &size);
  53. /* This can only happen with 512 byte blocks */
  54. if (size > sdp->sd_jbsize)
  55. return -ERANGE;
  56. return 0;
  57. }
  58. typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
  59. struct gfs2_ea_header *ea,
  60. struct gfs2_ea_header *prev, void *private);
  61. static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
  62. ea_call_t ea_call, void *data)
  63. {
  64. struct gfs2_ea_header *ea, *prev = NULL;
  65. int error = 0;
  66. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
  67. return -EIO;
  68. for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
  69. if (!GFS2_EA_REC_LEN(ea))
  70. goto fail;
  71. if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
  72. bh->b_data + bh->b_size))
  73. goto fail;
  74. if (!GFS2_EATYPE_VALID(ea->ea_type))
  75. goto fail;
  76. error = ea_call(ip, bh, ea, prev, data);
  77. if (error)
  78. return error;
  79. if (GFS2_EA_IS_LAST(ea)) {
  80. if ((char *)GFS2_EA2NEXT(ea) !=
  81. bh->b_data + bh->b_size)
  82. goto fail;
  83. break;
  84. }
  85. }
  86. return error;
  87. fail:
  88. gfs2_consist_inode(ip);
  89. return -EIO;
  90. }
  91. static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
  92. {
  93. struct buffer_head *bh, *eabh;
  94. __be64 *eablk, *end;
  95. int error;
  96. error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
  97. if (error)
  98. return error;
  99. if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
  100. error = ea_foreach_i(ip, bh, ea_call, data);
  101. goto out;
  102. }
  103. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
  104. error = -EIO;
  105. goto out;
  106. }
  107. eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
  108. end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
  109. for (; eablk < end; eablk++) {
  110. u64 bn;
  111. if (!*eablk)
  112. break;
  113. bn = be64_to_cpu(*eablk);
  114. error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
  115. if (error)
  116. break;
  117. error = ea_foreach_i(ip, eabh, ea_call, data);
  118. brelse(eabh);
  119. if (error)
  120. break;
  121. }
  122. out:
  123. brelse(bh);
  124. return error;
  125. }
  126. struct ea_find {
  127. struct gfs2_ea_request *ef_er;
  128. struct gfs2_ea_location *ef_el;
  129. };
  130. static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
  131. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  132. void *private)
  133. {
  134. struct ea_find *ef = private;
  135. struct gfs2_ea_request *er = ef->ef_er;
  136. if (ea->ea_type == GFS2_EATYPE_UNUSED)
  137. return 0;
  138. if (ea->ea_type == er->er_type) {
  139. if (ea->ea_name_len == er->er_name_len &&
  140. !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
  141. struct gfs2_ea_location *el = ef->ef_el;
  142. get_bh(bh);
  143. el->el_bh = bh;
  144. el->el_ea = ea;
  145. el->el_prev = prev;
  146. return 1;
  147. }
  148. }
  149. return 0;
  150. }
  151. int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  152. struct gfs2_ea_location *el)
  153. {
  154. struct ea_find ef;
  155. int error;
  156. ef.ef_er = er;
  157. ef.ef_el = el;
  158. memset(el, 0, sizeof(struct gfs2_ea_location));
  159. error = ea_foreach(ip, ea_find_i, &ef);
  160. if (error > 0)
  161. return 0;
  162. return error;
  163. }
  164. /**
  165. * ea_dealloc_unstuffed -
  166. * @ip:
  167. * @bh:
  168. * @ea:
  169. * @prev:
  170. * @private:
  171. *
  172. * Take advantage of the fact that all unstuffed blocks are
  173. * allocated from the same RG. But watch, this may not always
  174. * be true.
  175. *
  176. * Returns: errno
  177. */
  178. static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
  179. struct gfs2_ea_header *ea,
  180. struct gfs2_ea_header *prev, void *private)
  181. {
  182. int *leave = private;
  183. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  184. struct gfs2_rgrpd *rgd;
  185. struct gfs2_holder rg_gh;
  186. struct buffer_head *dibh;
  187. __be64 *dataptrs;
  188. u64 bn = 0;
  189. u64 bstart = 0;
  190. unsigned int blen = 0;
  191. unsigned int blks = 0;
  192. unsigned int x;
  193. int error;
  194. if (GFS2_EA_IS_STUFFED(ea))
  195. return 0;
  196. dataptrs = GFS2_EA2DATAPTRS(ea);
  197. for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
  198. if (*dataptrs) {
  199. blks++;
  200. bn = be64_to_cpu(*dataptrs);
  201. }
  202. }
  203. if (!blks)
  204. return 0;
  205. rgd = gfs2_blk2rgrpd(sdp, bn);
  206. if (!rgd) {
  207. gfs2_consist_inode(ip);
  208. return -EIO;
  209. }
  210. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
  211. if (error)
  212. return error;
  213. error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
  214. RES_EATTR + RES_STATFS + RES_QUOTA, blks);
  215. if (error)
  216. goto out_gunlock;
  217. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  218. dataptrs = GFS2_EA2DATAPTRS(ea);
  219. for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
  220. if (!*dataptrs)
  221. break;
  222. bn = be64_to_cpu(*dataptrs);
  223. if (bstart + blen == bn)
  224. blen++;
  225. else {
  226. if (bstart)
  227. gfs2_free_meta(ip, bstart, blen);
  228. bstart = bn;
  229. blen = 1;
  230. }
  231. *dataptrs = 0;
  232. if (!ip->i_di.di_blocks)
  233. gfs2_consist_inode(ip);
  234. ip->i_di.di_blocks--;
  235. gfs2_set_inode_blocks(&ip->i_inode);
  236. }
  237. if (bstart)
  238. gfs2_free_meta(ip, bstart, blen);
  239. if (prev && !leave) {
  240. u32 len;
  241. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  242. prev->ea_rec_len = cpu_to_be32(len);
  243. if (GFS2_EA_IS_LAST(ea))
  244. prev->ea_flags |= GFS2_EAFLAG_LAST;
  245. } else {
  246. ea->ea_type = GFS2_EATYPE_UNUSED;
  247. ea->ea_num_ptrs = 0;
  248. }
  249. error = gfs2_meta_inode_buffer(ip, &dibh);
  250. if (!error) {
  251. ip->i_inode.i_ctime = CURRENT_TIME;
  252. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  253. gfs2_dinode_out(ip, dibh->b_data);
  254. brelse(dibh);
  255. }
  256. gfs2_trans_end(sdp);
  257. out_gunlock:
  258. gfs2_glock_dq_uninit(&rg_gh);
  259. return error;
  260. }
  261. static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
  262. struct gfs2_ea_header *ea,
  263. struct gfs2_ea_header *prev, int leave)
  264. {
  265. struct gfs2_alloc *al;
  266. int error;
  267. al = gfs2_alloc_get(ip);
  268. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  269. if (error)
  270. goto out_alloc;
  271. error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
  272. if (error)
  273. goto out_quota;
  274. error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
  275. gfs2_glock_dq_uninit(&al->al_ri_gh);
  276. out_quota:
  277. gfs2_quota_unhold(ip);
  278. out_alloc:
  279. gfs2_alloc_put(ip);
  280. return error;
  281. }
  282. struct ea_list {
  283. struct gfs2_ea_request *ei_er;
  284. unsigned int ei_size;
  285. };
  286. static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
  287. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  288. void *private)
  289. {
  290. struct ea_list *ei = private;
  291. struct gfs2_ea_request *er = ei->ei_er;
  292. unsigned int ea_size = gfs2_ea_strlen(ea);
  293. if (ea->ea_type == GFS2_EATYPE_UNUSED)
  294. return 0;
  295. if (er->er_data_len) {
  296. char *prefix = NULL;
  297. unsigned int l = 0;
  298. char c = 0;
  299. if (ei->ei_size + ea_size > er->er_data_len)
  300. return -ERANGE;
  301. switch (ea->ea_type) {
  302. case GFS2_EATYPE_USR:
  303. prefix = "user.";
  304. l = 5;
  305. break;
  306. case GFS2_EATYPE_SYS:
  307. prefix = "system.";
  308. l = 7;
  309. break;
  310. case GFS2_EATYPE_SECURITY:
  311. prefix = "security.";
  312. l = 9;
  313. break;
  314. }
  315. BUG_ON(l == 0);
  316. memcpy(er->er_data + ei->ei_size, prefix, l);
  317. memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
  318. ea->ea_name_len);
  319. memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
  320. }
  321. ei->ei_size += ea_size;
  322. return 0;
  323. }
  324. /**
  325. * gfs2_ea_list -
  326. * @ip:
  327. * @er:
  328. *
  329. * Returns: actual size of data on success, -errno on error
  330. */
  331. int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  332. {
  333. struct gfs2_holder i_gh;
  334. int error;
  335. if (!er->er_data || !er->er_data_len) {
  336. er->er_data = NULL;
  337. er->er_data_len = 0;
  338. }
  339. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  340. if (error)
  341. return error;
  342. if (ip->i_di.di_eattr) {
  343. struct ea_list ei = { .ei_er = er, .ei_size = 0 };
  344. error = ea_foreach(ip, ea_list_i, &ei);
  345. if (!error)
  346. error = ei.ei_size;
  347. }
  348. gfs2_glock_dq_uninit(&i_gh);
  349. return error;
  350. }
  351. /**
  352. * ea_get_unstuffed - actually copies the unstuffed data into the
  353. * request buffer
  354. * @ip: The GFS2 inode
  355. * @ea: The extended attribute header structure
  356. * @data: The data to be copied
  357. *
  358. * Returns: errno
  359. */
  360. static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
  361. char *data)
  362. {
  363. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  364. struct buffer_head **bh;
  365. unsigned int amount = GFS2_EA_DATA_LEN(ea);
  366. unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
  367. __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
  368. unsigned int x;
  369. int error = 0;
  370. bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
  371. if (!bh)
  372. return -ENOMEM;
  373. for (x = 0; x < nptrs; x++) {
  374. error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
  375. bh + x);
  376. if (error) {
  377. while (x--)
  378. brelse(bh[x]);
  379. goto out;
  380. }
  381. dataptrs++;
  382. }
  383. for (x = 0; x < nptrs; x++) {
  384. error = gfs2_meta_wait(sdp, bh[x]);
  385. if (error) {
  386. for (; x < nptrs; x++)
  387. brelse(bh[x]);
  388. goto out;
  389. }
  390. if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
  391. for (; x < nptrs; x++)
  392. brelse(bh[x]);
  393. error = -EIO;
  394. goto out;
  395. }
  396. memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
  397. (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
  398. amount -= sdp->sd_jbsize;
  399. data += sdp->sd_jbsize;
  400. brelse(bh[x]);
  401. }
  402. out:
  403. kfree(bh);
  404. return error;
  405. }
  406. int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
  407. char *data)
  408. {
  409. if (GFS2_EA_IS_STUFFED(el->el_ea)) {
  410. memcpy(data, GFS2_EA2DATA(el->el_ea), GFS2_EA_DATA_LEN(el->el_ea));
  411. return 0;
  412. } else
  413. return ea_get_unstuffed(ip, el->el_ea, data);
  414. }
  415. /**
  416. * gfs2_ea_get_i -
  417. * @ip: The GFS2 inode
  418. * @er: The request structure
  419. *
  420. * Returns: actual size of data on success, -errno on error
  421. */
  422. int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  423. {
  424. struct gfs2_ea_location el;
  425. int error;
  426. if (!ip->i_di.di_eattr)
  427. return -ENODATA;
  428. error = gfs2_ea_find(ip, er, &el);
  429. if (error)
  430. return error;
  431. if (!el.el_ea)
  432. return -ENODATA;
  433. if (er->er_data_len) {
  434. if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
  435. error = -ERANGE;
  436. else
  437. error = gfs2_ea_get_copy(ip, &el, er->er_data);
  438. }
  439. if (!error)
  440. error = GFS2_EA_DATA_LEN(el.el_ea);
  441. brelse(el.el_bh);
  442. return error;
  443. }
  444. /**
  445. * gfs2_ea_get -
  446. * @ip: The GFS2 inode
  447. * @er: The request structure
  448. *
  449. * Returns: actual size of data on success, -errno on error
  450. */
  451. int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  452. {
  453. struct gfs2_holder i_gh;
  454. int error;
  455. if (!er->er_name_len ||
  456. er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  457. return -EINVAL;
  458. if (!er->er_data || !er->er_data_len) {
  459. er->er_data = NULL;
  460. er->er_data_len = 0;
  461. }
  462. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  463. if (error)
  464. return error;
  465. error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
  466. gfs2_glock_dq_uninit(&i_gh);
  467. return error;
  468. }
  469. /**
  470. * ea_alloc_blk - allocates a new block for extended attributes.
  471. * @ip: A pointer to the inode that's getting extended attributes
  472. * @bhp: Pointer to pointer to a struct buffer_head
  473. *
  474. * Returns: errno
  475. */
  476. static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
  477. {
  478. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  479. struct gfs2_ea_header *ea;
  480. u64 block;
  481. block = gfs2_alloc_meta(ip);
  482. *bhp = gfs2_meta_new(ip->i_gl, block);
  483. gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
  484. gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
  485. gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
  486. ea = GFS2_EA_BH2FIRST(*bhp);
  487. ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
  488. ea->ea_type = GFS2_EATYPE_UNUSED;
  489. ea->ea_flags = GFS2_EAFLAG_LAST;
  490. ea->ea_num_ptrs = 0;
  491. ip->i_di.di_blocks++;
  492. gfs2_set_inode_blocks(&ip->i_inode);
  493. return 0;
  494. }
  495. /**
  496. * ea_write - writes the request info to an ea, creating new blocks if
  497. * necessary
  498. * @ip: inode that is being modified
  499. * @ea: the location of the new ea in a block
  500. * @er: the write request
  501. *
  502. * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
  503. *
  504. * returns : errno
  505. */
  506. static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
  507. struct gfs2_ea_request *er)
  508. {
  509. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  510. ea->ea_data_len = cpu_to_be32(er->er_data_len);
  511. ea->ea_name_len = er->er_name_len;
  512. ea->ea_type = er->er_type;
  513. ea->__pad = 0;
  514. memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
  515. if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
  516. ea->ea_num_ptrs = 0;
  517. memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
  518. } else {
  519. __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
  520. const char *data = er->er_data;
  521. unsigned int data_len = er->er_data_len;
  522. unsigned int copy;
  523. unsigned int x;
  524. ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
  525. for (x = 0; x < ea->ea_num_ptrs; x++) {
  526. struct buffer_head *bh;
  527. u64 block;
  528. int mh_size = sizeof(struct gfs2_meta_header);
  529. block = gfs2_alloc_meta(ip);
  530. bh = gfs2_meta_new(ip->i_gl, block);
  531. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  532. gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
  533. ip->i_di.di_blocks++;
  534. gfs2_set_inode_blocks(&ip->i_inode);
  535. copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
  536. data_len;
  537. memcpy(bh->b_data + mh_size, data, copy);
  538. if (copy < sdp->sd_jbsize)
  539. memset(bh->b_data + mh_size + copy, 0,
  540. sdp->sd_jbsize - copy);
  541. *dataptr++ = cpu_to_be64(bh->b_blocknr);
  542. data += copy;
  543. data_len -= copy;
  544. brelse(bh);
  545. }
  546. gfs2_assert_withdraw(sdp, !data_len);
  547. }
  548. return 0;
  549. }
  550. typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
  551. struct gfs2_ea_request *er, void *private);
  552. static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  553. unsigned int blks,
  554. ea_skeleton_call_t skeleton_call, void *private)
  555. {
  556. struct gfs2_alloc *al;
  557. struct buffer_head *dibh;
  558. int error;
  559. al = gfs2_alloc_get(ip);
  560. error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  561. if (error)
  562. goto out;
  563. error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
  564. if (error)
  565. goto out_gunlock_q;
  566. al->al_requested = blks;
  567. error = gfs2_inplace_reserve(ip);
  568. if (error)
  569. goto out_gunlock_q;
  570. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
  571. blks + al->al_rgd->rd_length +
  572. RES_DINODE + RES_STATFS + RES_QUOTA, 0);
  573. if (error)
  574. goto out_ipres;
  575. error = skeleton_call(ip, er, private);
  576. if (error)
  577. goto out_end_trans;
  578. error = gfs2_meta_inode_buffer(ip, &dibh);
  579. if (!error) {
  580. if (er->er_flags & GFS2_ERF_MODE) {
  581. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  582. (ip->i_inode.i_mode & S_IFMT) ==
  583. (er->er_mode & S_IFMT));
  584. ip->i_inode.i_mode = er->er_mode;
  585. }
  586. ip->i_inode.i_ctime = CURRENT_TIME;
  587. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  588. gfs2_dinode_out(ip, dibh->b_data);
  589. brelse(dibh);
  590. }
  591. out_end_trans:
  592. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  593. out_ipres:
  594. gfs2_inplace_release(ip);
  595. out_gunlock_q:
  596. gfs2_quota_unlock(ip);
  597. out:
  598. gfs2_alloc_put(ip);
  599. return error;
  600. }
  601. static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  602. void *private)
  603. {
  604. struct buffer_head *bh;
  605. int error;
  606. error = ea_alloc_blk(ip, &bh);
  607. if (error)
  608. return error;
  609. ip->i_di.di_eattr = bh->b_blocknr;
  610. error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
  611. brelse(bh);
  612. return error;
  613. }
  614. /**
  615. * ea_init - initializes a new eattr block
  616. * @ip:
  617. * @er:
  618. *
  619. * Returns: errno
  620. */
  621. static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  622. {
  623. unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
  624. unsigned int blks = 1;
  625. if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
  626. blks += DIV_ROUND_UP(er->er_data_len, jbsize);
  627. return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
  628. }
  629. static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
  630. {
  631. u32 ea_size = GFS2_EA_SIZE(ea);
  632. struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
  633. ea_size);
  634. u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
  635. int last = ea->ea_flags & GFS2_EAFLAG_LAST;
  636. ea->ea_rec_len = cpu_to_be32(ea_size);
  637. ea->ea_flags ^= last;
  638. new->ea_rec_len = cpu_to_be32(new_size);
  639. new->ea_flags = last;
  640. return new;
  641. }
  642. static void ea_set_remove_stuffed(struct gfs2_inode *ip,
  643. struct gfs2_ea_location *el)
  644. {
  645. struct gfs2_ea_header *ea = el->el_ea;
  646. struct gfs2_ea_header *prev = el->el_prev;
  647. u32 len;
  648. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  649. if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
  650. ea->ea_type = GFS2_EATYPE_UNUSED;
  651. return;
  652. } else if (GFS2_EA2NEXT(prev) != ea) {
  653. prev = GFS2_EA2NEXT(prev);
  654. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
  655. }
  656. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  657. prev->ea_rec_len = cpu_to_be32(len);
  658. if (GFS2_EA_IS_LAST(ea))
  659. prev->ea_flags |= GFS2_EAFLAG_LAST;
  660. }
  661. struct ea_set {
  662. int ea_split;
  663. struct gfs2_ea_request *es_er;
  664. struct gfs2_ea_location *es_el;
  665. struct buffer_head *es_bh;
  666. struct gfs2_ea_header *es_ea;
  667. };
  668. static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
  669. struct gfs2_ea_header *ea, struct ea_set *es)
  670. {
  671. struct gfs2_ea_request *er = es->es_er;
  672. struct buffer_head *dibh;
  673. int error;
  674. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
  675. if (error)
  676. return error;
  677. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  678. if (es->ea_split)
  679. ea = ea_split_ea(ea);
  680. ea_write(ip, ea, er);
  681. if (es->es_el)
  682. ea_set_remove_stuffed(ip, es->es_el);
  683. error = gfs2_meta_inode_buffer(ip, &dibh);
  684. if (error)
  685. goto out;
  686. if (er->er_flags & GFS2_ERF_MODE) {
  687. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  688. (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
  689. ip->i_inode.i_mode = er->er_mode;
  690. }
  691. ip->i_inode.i_ctime = CURRENT_TIME;
  692. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  693. gfs2_dinode_out(ip, dibh->b_data);
  694. brelse(dibh);
  695. out:
  696. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  697. return error;
  698. }
  699. static int ea_set_simple_alloc(struct gfs2_inode *ip,
  700. struct gfs2_ea_request *er, void *private)
  701. {
  702. struct ea_set *es = private;
  703. struct gfs2_ea_header *ea = es->es_ea;
  704. int error;
  705. gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
  706. if (es->ea_split)
  707. ea = ea_split_ea(ea);
  708. error = ea_write(ip, ea, er);
  709. if (error)
  710. return error;
  711. if (es->es_el)
  712. ea_set_remove_stuffed(ip, es->es_el);
  713. return 0;
  714. }
  715. static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
  716. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  717. void *private)
  718. {
  719. struct ea_set *es = private;
  720. unsigned int size;
  721. int stuffed;
  722. int error;
  723. stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er, &size);
  724. if (ea->ea_type == GFS2_EATYPE_UNUSED) {
  725. if (GFS2_EA_REC_LEN(ea) < size)
  726. return 0;
  727. if (!GFS2_EA_IS_STUFFED(ea)) {
  728. error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
  729. if (error)
  730. return error;
  731. }
  732. es->ea_split = 0;
  733. } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
  734. es->ea_split = 1;
  735. else
  736. return 0;
  737. if (stuffed) {
  738. error = ea_set_simple_noalloc(ip, bh, ea, es);
  739. if (error)
  740. return error;
  741. } else {
  742. unsigned int blks;
  743. es->es_bh = bh;
  744. es->es_ea = ea;
  745. blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
  746. GFS2_SB(&ip->i_inode)->sd_jbsize);
  747. error = ea_alloc_skeleton(ip, es->es_er, blks,
  748. ea_set_simple_alloc, es);
  749. if (error)
  750. return error;
  751. }
  752. return 1;
  753. }
  754. static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  755. void *private)
  756. {
  757. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  758. struct buffer_head *indbh, *newbh;
  759. __be64 *eablk;
  760. int error;
  761. int mh_size = sizeof(struct gfs2_meta_header);
  762. if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
  763. __be64 *end;
  764. error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
  765. &indbh);
  766. if (error)
  767. return error;
  768. if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
  769. error = -EIO;
  770. goto out;
  771. }
  772. eablk = (__be64 *)(indbh->b_data + mh_size);
  773. end = eablk + sdp->sd_inptrs;
  774. for (; eablk < end; eablk++)
  775. if (!*eablk)
  776. break;
  777. if (eablk == end) {
  778. error = -ENOSPC;
  779. goto out;
  780. }
  781. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  782. } else {
  783. u64 blk;
  784. blk = gfs2_alloc_meta(ip);
  785. indbh = gfs2_meta_new(ip->i_gl, blk);
  786. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  787. gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
  788. gfs2_buffer_clear_tail(indbh, mh_size);
  789. eablk = (__be64 *)(indbh->b_data + mh_size);
  790. *eablk = cpu_to_be64(ip->i_di.di_eattr);
  791. ip->i_di.di_eattr = blk;
  792. ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
  793. ip->i_di.di_blocks++;
  794. gfs2_set_inode_blocks(&ip->i_inode);
  795. eablk++;
  796. }
  797. error = ea_alloc_blk(ip, &newbh);
  798. if (error)
  799. goto out;
  800. *eablk = cpu_to_be64((u64)newbh->b_blocknr);
  801. error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
  802. brelse(newbh);
  803. if (error)
  804. goto out;
  805. if (private)
  806. ea_set_remove_stuffed(ip, private);
  807. out:
  808. brelse(indbh);
  809. return error;
  810. }
  811. static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  812. struct gfs2_ea_location *el)
  813. {
  814. struct ea_set es;
  815. unsigned int blks = 2;
  816. int error;
  817. memset(&es, 0, sizeof(struct ea_set));
  818. es.es_er = er;
  819. es.es_el = el;
  820. error = ea_foreach(ip, ea_set_simple, &es);
  821. if (error > 0)
  822. return 0;
  823. if (error)
  824. return error;
  825. if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
  826. blks++;
  827. if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
  828. blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
  829. return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
  830. }
  831. static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
  832. struct gfs2_ea_location *el)
  833. {
  834. if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
  835. el->el_prev = GFS2_EA2NEXT(el->el_prev);
  836. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  837. GFS2_EA2NEXT(el->el_prev) == el->el_ea);
  838. }
  839. return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
  840. }
  841. int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  842. {
  843. struct gfs2_ea_location el;
  844. int error;
  845. if (!ip->i_di.di_eattr) {
  846. if (er->er_flags & XATTR_REPLACE)
  847. return -ENODATA;
  848. return ea_init(ip, er);
  849. }
  850. error = gfs2_ea_find(ip, er, &el);
  851. if (error)
  852. return error;
  853. if (el.el_ea) {
  854. if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
  855. brelse(el.el_bh);
  856. return -EPERM;
  857. }
  858. error = -EEXIST;
  859. if (!(er->er_flags & XATTR_CREATE)) {
  860. int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
  861. error = ea_set_i(ip, er, &el);
  862. if (!error && unstuffed)
  863. ea_set_remove_unstuffed(ip, &el);
  864. }
  865. brelse(el.el_bh);
  866. } else {
  867. error = -ENODATA;
  868. if (!(er->er_flags & XATTR_REPLACE))
  869. error = ea_set_i(ip, er, NULL);
  870. }
  871. return error;
  872. }
  873. int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  874. {
  875. struct gfs2_holder i_gh;
  876. int error;
  877. if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  878. return -EINVAL;
  879. if (!er->er_data || !er->er_data_len) {
  880. er->er_data = NULL;
  881. er->er_data_len = 0;
  882. }
  883. error = ea_check_size(GFS2_SB(&ip->i_inode), er);
  884. if (error)
  885. return error;
  886. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  887. if (error)
  888. return error;
  889. if (IS_IMMUTABLE(&ip->i_inode))
  890. error = -EPERM;
  891. else
  892. error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
  893. gfs2_glock_dq_uninit(&i_gh);
  894. return error;
  895. }
  896. static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
  897. {
  898. struct gfs2_ea_header *ea = el->el_ea;
  899. struct gfs2_ea_header *prev = el->el_prev;
  900. struct buffer_head *dibh;
  901. int error;
  902. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
  903. if (error)
  904. return error;
  905. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  906. if (prev) {
  907. u32 len;
  908. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  909. prev->ea_rec_len = cpu_to_be32(len);
  910. if (GFS2_EA_IS_LAST(ea))
  911. prev->ea_flags |= GFS2_EAFLAG_LAST;
  912. } else
  913. ea->ea_type = GFS2_EATYPE_UNUSED;
  914. error = gfs2_meta_inode_buffer(ip, &dibh);
  915. if (!error) {
  916. ip->i_inode.i_ctime = CURRENT_TIME;
  917. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  918. gfs2_dinode_out(ip, dibh->b_data);
  919. brelse(dibh);
  920. }
  921. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  922. return error;
  923. }
  924. int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  925. {
  926. struct gfs2_ea_location el;
  927. int error;
  928. if (!ip->i_di.di_eattr)
  929. return -ENODATA;
  930. error = gfs2_ea_find(ip, er, &el);
  931. if (error)
  932. return error;
  933. if (!el.el_ea)
  934. return -ENODATA;
  935. if (GFS2_EA_IS_STUFFED(el.el_ea))
  936. error = ea_remove_stuffed(ip, &el);
  937. else
  938. error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
  939. 0);
  940. brelse(el.el_bh);
  941. return error;
  942. }
  943. /**
  944. * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
  945. * @ip: pointer to the inode of the target file
  946. * @er: request information
  947. *
  948. * Returns: errno
  949. */
  950. int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  951. {
  952. struct gfs2_holder i_gh;
  953. int error;
  954. if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  955. return -EINVAL;
  956. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  957. if (error)
  958. return error;
  959. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  960. error = -EPERM;
  961. else
  962. error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
  963. gfs2_glock_dq_uninit(&i_gh);
  964. return error;
  965. }
  966. static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
  967. struct gfs2_ea_header *ea, char *data)
  968. {
  969. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  970. struct buffer_head **bh;
  971. unsigned int amount = GFS2_EA_DATA_LEN(ea);
  972. unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
  973. __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
  974. unsigned int x;
  975. int error;
  976. bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
  977. if (!bh)
  978. return -ENOMEM;
  979. error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
  980. if (error)
  981. goto out;
  982. for (x = 0; x < nptrs; x++) {
  983. error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
  984. bh + x);
  985. if (error) {
  986. while (x--)
  987. brelse(bh[x]);
  988. goto fail;
  989. }
  990. dataptrs++;
  991. }
  992. for (x = 0; x < nptrs; x++) {
  993. error = gfs2_meta_wait(sdp, bh[x]);
  994. if (error) {
  995. for (; x < nptrs; x++)
  996. brelse(bh[x]);
  997. goto fail;
  998. }
  999. if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
  1000. for (; x < nptrs; x++)
  1001. brelse(bh[x]);
  1002. error = -EIO;
  1003. goto fail;
  1004. }
  1005. gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
  1006. memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
  1007. (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
  1008. amount -= sdp->sd_jbsize;
  1009. data += sdp->sd_jbsize;
  1010. brelse(bh[x]);
  1011. }
  1012. out:
  1013. kfree(bh);
  1014. return error;
  1015. fail:
  1016. gfs2_trans_end(sdp);
  1017. kfree(bh);
  1018. return error;
  1019. }
  1020. int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
  1021. struct iattr *attr, char *data)
  1022. {
  1023. struct buffer_head *dibh;
  1024. int error;
  1025. if (GFS2_EA_IS_STUFFED(el->el_ea)) {
  1026. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
  1027. if (error)
  1028. return error;
  1029. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  1030. memcpy(GFS2_EA2DATA(el->el_ea), data,
  1031. GFS2_EA_DATA_LEN(el->el_ea));
  1032. } else
  1033. error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
  1034. if (error)
  1035. return error;
  1036. error = gfs2_meta_inode_buffer(ip, &dibh);
  1037. if (!error) {
  1038. error = inode_setattr(&ip->i_inode, attr);
  1039. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1040. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1041. gfs2_dinode_out(ip, dibh->b_data);
  1042. brelse(dibh);
  1043. }
  1044. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1045. return error;
  1046. }
  1047. static int ea_dealloc_indirect(struct gfs2_inode *ip)
  1048. {
  1049. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1050. struct gfs2_rgrp_list rlist;
  1051. struct buffer_head *indbh, *dibh;
  1052. __be64 *eablk, *end;
  1053. unsigned int rg_blocks = 0;
  1054. u64 bstart = 0;
  1055. unsigned int blen = 0;
  1056. unsigned int blks = 0;
  1057. unsigned int x;
  1058. int error;
  1059. memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
  1060. error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &indbh);
  1061. if (error)
  1062. return error;
  1063. if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
  1064. error = -EIO;
  1065. goto out;
  1066. }
  1067. eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
  1068. end = eablk + sdp->sd_inptrs;
  1069. for (; eablk < end; eablk++) {
  1070. u64 bn;
  1071. if (!*eablk)
  1072. break;
  1073. bn = be64_to_cpu(*eablk);
  1074. if (bstart + blen == bn)
  1075. blen++;
  1076. else {
  1077. if (bstart)
  1078. gfs2_rlist_add(sdp, &rlist, bstart);
  1079. bstart = bn;
  1080. blen = 1;
  1081. }
  1082. blks++;
  1083. }
  1084. if (bstart)
  1085. gfs2_rlist_add(sdp, &rlist, bstart);
  1086. else
  1087. goto out;
  1088. gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
  1089. for (x = 0; x < rlist.rl_rgrps; x++) {
  1090. struct gfs2_rgrpd *rgd;
  1091. rgd = rlist.rl_ghs[x].gh_gl->gl_object;
  1092. rg_blocks += rgd->rd_length;
  1093. }
  1094. error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
  1095. if (error)
  1096. goto out_rlist_free;
  1097. error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
  1098. RES_STATFS + RES_QUOTA, blks);
  1099. if (error)
  1100. goto out_gunlock;
  1101. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  1102. eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
  1103. bstart = 0;
  1104. blen = 0;
  1105. for (; eablk < end; eablk++) {
  1106. u64 bn;
  1107. if (!*eablk)
  1108. break;
  1109. bn = be64_to_cpu(*eablk);
  1110. if (bstart + blen == bn)
  1111. blen++;
  1112. else {
  1113. if (bstart)
  1114. gfs2_free_meta(ip, bstart, blen);
  1115. bstart = bn;
  1116. blen = 1;
  1117. }
  1118. *eablk = 0;
  1119. if (!ip->i_di.di_blocks)
  1120. gfs2_consist_inode(ip);
  1121. ip->i_di.di_blocks--;
  1122. gfs2_set_inode_blocks(&ip->i_inode);
  1123. }
  1124. if (bstart)
  1125. gfs2_free_meta(ip, bstart, blen);
  1126. ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
  1127. error = gfs2_meta_inode_buffer(ip, &dibh);
  1128. if (!error) {
  1129. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1130. gfs2_dinode_out(ip, dibh->b_data);
  1131. brelse(dibh);
  1132. }
  1133. gfs2_trans_end(sdp);
  1134. out_gunlock:
  1135. gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
  1136. out_rlist_free:
  1137. gfs2_rlist_free(&rlist);
  1138. out:
  1139. brelse(indbh);
  1140. return error;
  1141. }
  1142. static int ea_dealloc_block(struct gfs2_inode *ip)
  1143. {
  1144. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1145. struct gfs2_alloc *al = &ip->i_alloc;
  1146. struct gfs2_rgrpd *rgd;
  1147. struct buffer_head *dibh;
  1148. int error;
  1149. rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
  1150. if (!rgd) {
  1151. gfs2_consist_inode(ip);
  1152. return -EIO;
  1153. }
  1154. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  1155. &al->al_rgd_gh);
  1156. if (error)
  1157. return error;
  1158. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
  1159. RES_QUOTA, 1);
  1160. if (error)
  1161. goto out_gunlock;
  1162. gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
  1163. ip->i_di.di_eattr = 0;
  1164. if (!ip->i_di.di_blocks)
  1165. gfs2_consist_inode(ip);
  1166. ip->i_di.di_blocks--;
  1167. gfs2_set_inode_blocks(&ip->i_inode);
  1168. error = gfs2_meta_inode_buffer(ip, &dibh);
  1169. if (!error) {
  1170. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1171. gfs2_dinode_out(ip, dibh->b_data);
  1172. brelse(dibh);
  1173. }
  1174. gfs2_trans_end(sdp);
  1175. out_gunlock:
  1176. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  1177. return error;
  1178. }
  1179. /**
  1180. * gfs2_ea_dealloc - deallocate the extended attribute fork
  1181. * @ip: the inode
  1182. *
  1183. * Returns: errno
  1184. */
  1185. int gfs2_ea_dealloc(struct gfs2_inode *ip)
  1186. {
  1187. struct gfs2_alloc *al;
  1188. int error;
  1189. al = gfs2_alloc_get(ip);
  1190. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  1191. if (error)
  1192. goto out_alloc;
  1193. error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
  1194. if (error)
  1195. goto out_quota;
  1196. error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
  1197. if (error)
  1198. goto out_rindex;
  1199. if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
  1200. error = ea_dealloc_indirect(ip);
  1201. if (error)
  1202. goto out_rindex;
  1203. }
  1204. error = ea_dealloc_block(ip);
  1205. out_rindex:
  1206. gfs2_glock_dq_uninit(&al->al_ri_gh);
  1207. out_quota:
  1208. gfs2_quota_unhold(ip);
  1209. out_alloc:
  1210. gfs2_alloc_put(ip);
  1211. return error;
  1212. }