eattr.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/completion.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/xattr.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include <asm/uaccess.h>
  16. #include "gfs2.h"
  17. #include "incore.h"
  18. #include "acl.h"
  19. #include "eaops.h"
  20. #include "eattr.h"
  21. #include "glock.h"
  22. #include "inode.h"
  23. #include "meta_io.h"
  24. #include "quota.h"
  25. #include "rgrp.h"
  26. #include "trans.h"
  27. #include "util.h"
  28. /**
  29. * ea_calc_size - returns the acutal number of bytes the request will take up
  30. * (not counting any unstuffed data blocks)
  31. * @sdp:
  32. * @er:
  33. * @size:
  34. *
  35. * Returns: 1 if the EA should be stuffed
  36. */
  37. static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
  38. unsigned int *size)
  39. {
  40. *size = GFS2_EAREQ_SIZE_STUFFED(er);
  41. if (*size <= sdp->sd_jbsize)
  42. return 1;
  43. *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
  44. return 0;
  45. }
  46. static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
  47. {
  48. unsigned int size;
  49. if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
  50. return -ERANGE;
  51. ea_calc_size(sdp, er, &size);
  52. /* This can only happen with 512 byte blocks */
  53. if (size > sdp->sd_jbsize)
  54. return -ERANGE;
  55. return 0;
  56. }
  57. typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
  58. struct gfs2_ea_header *ea,
  59. struct gfs2_ea_header *prev, void *private);
  60. static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
  61. ea_call_t ea_call, void *data)
  62. {
  63. struct gfs2_ea_header *ea, *prev = NULL;
  64. int error = 0;
  65. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
  66. return -EIO;
  67. for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
  68. if (!GFS2_EA_REC_LEN(ea))
  69. goto fail;
  70. if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
  71. bh->b_data + bh->b_size))
  72. goto fail;
  73. if (!GFS2_EATYPE_VALID(ea->ea_type))
  74. goto fail;
  75. error = ea_call(ip, bh, ea, prev, data);
  76. if (error)
  77. return error;
  78. if (GFS2_EA_IS_LAST(ea)) {
  79. if ((char *)GFS2_EA2NEXT(ea) !=
  80. bh->b_data + bh->b_size)
  81. goto fail;
  82. break;
  83. }
  84. }
  85. return error;
  86. fail:
  87. gfs2_consist_inode(ip);
  88. return -EIO;
  89. }
  90. static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
  91. {
  92. struct buffer_head *bh, *eabh;
  93. __be64 *eablk, *end;
  94. int error;
  95. error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &bh);
  96. if (error)
  97. return error;
  98. if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
  99. error = ea_foreach_i(ip, bh, ea_call, data);
  100. goto out;
  101. }
  102. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
  103. error = -EIO;
  104. goto out;
  105. }
  106. eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
  107. end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
  108. for (; eablk < end; eablk++) {
  109. u64 bn;
  110. if (!*eablk)
  111. break;
  112. bn = be64_to_cpu(*eablk);
  113. error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
  114. if (error)
  115. break;
  116. error = ea_foreach_i(ip, eabh, ea_call, data);
  117. brelse(eabh);
  118. if (error)
  119. break;
  120. }
  121. out:
  122. brelse(bh);
  123. return error;
  124. }
  125. struct ea_find {
  126. struct gfs2_ea_request *ef_er;
  127. struct gfs2_ea_location *ef_el;
  128. };
  129. static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
  130. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  131. void *private)
  132. {
  133. struct ea_find *ef = private;
  134. struct gfs2_ea_request *er = ef->ef_er;
  135. if (ea->ea_type == GFS2_EATYPE_UNUSED)
  136. return 0;
  137. if (ea->ea_type == er->er_type) {
  138. if (ea->ea_name_len == er->er_name_len &&
  139. !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
  140. struct gfs2_ea_location *el = ef->ef_el;
  141. get_bh(bh);
  142. el->el_bh = bh;
  143. el->el_ea = ea;
  144. el->el_prev = prev;
  145. return 1;
  146. }
  147. }
  148. return 0;
  149. }
  150. int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  151. struct gfs2_ea_location *el)
  152. {
  153. struct ea_find ef;
  154. int error;
  155. ef.ef_er = er;
  156. ef.ef_el = el;
  157. memset(el, 0, sizeof(struct gfs2_ea_location));
  158. error = ea_foreach(ip, ea_find_i, &ef);
  159. if (error > 0)
  160. return 0;
  161. return error;
  162. }
  163. /**
  164. * ea_dealloc_unstuffed -
  165. * @ip:
  166. * @bh:
  167. * @ea:
  168. * @prev:
  169. * @private:
  170. *
  171. * Take advantage of the fact that all unstuffed blocks are
  172. * allocated from the same RG. But watch, this may not always
  173. * be true.
  174. *
  175. * Returns: errno
  176. */
  177. static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
  178. struct gfs2_ea_header *ea,
  179. struct gfs2_ea_header *prev, void *private)
  180. {
  181. int *leave = private;
  182. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  183. struct gfs2_rgrpd *rgd;
  184. struct gfs2_holder rg_gh;
  185. struct buffer_head *dibh;
  186. __be64 *dataptrs;
  187. u64 bn = 0;
  188. u64 bstart = 0;
  189. unsigned int blen = 0;
  190. unsigned int blks = 0;
  191. unsigned int x;
  192. int error;
  193. if (GFS2_EA_IS_STUFFED(ea))
  194. return 0;
  195. dataptrs = GFS2_EA2DATAPTRS(ea);
  196. for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
  197. if (*dataptrs) {
  198. blks++;
  199. bn = be64_to_cpu(*dataptrs);
  200. }
  201. }
  202. if (!blks)
  203. return 0;
  204. rgd = gfs2_blk2rgrpd(sdp, bn);
  205. if (!rgd) {
  206. gfs2_consist_inode(ip);
  207. return -EIO;
  208. }
  209. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
  210. if (error)
  211. return error;
  212. error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
  213. RES_EATTR + RES_STATFS + RES_QUOTA, blks);
  214. if (error)
  215. goto out_gunlock;
  216. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  217. dataptrs = GFS2_EA2DATAPTRS(ea);
  218. for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
  219. if (!*dataptrs)
  220. break;
  221. bn = be64_to_cpu(*dataptrs);
  222. if (bstart + blen == bn)
  223. blen++;
  224. else {
  225. if (bstart)
  226. gfs2_free_meta(ip, bstart, blen);
  227. bstart = bn;
  228. blen = 1;
  229. }
  230. *dataptrs = 0;
  231. gfs2_add_inode_blocks(&ip->i_inode, -1);
  232. }
  233. if (bstart)
  234. gfs2_free_meta(ip, bstart, blen);
  235. if (prev && !leave) {
  236. u32 len;
  237. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  238. prev->ea_rec_len = cpu_to_be32(len);
  239. if (GFS2_EA_IS_LAST(ea))
  240. prev->ea_flags |= GFS2_EAFLAG_LAST;
  241. } else {
  242. ea->ea_type = GFS2_EATYPE_UNUSED;
  243. ea->ea_num_ptrs = 0;
  244. }
  245. error = gfs2_meta_inode_buffer(ip, &dibh);
  246. if (!error) {
  247. ip->i_inode.i_ctime = CURRENT_TIME;
  248. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  249. gfs2_dinode_out(ip, dibh->b_data);
  250. brelse(dibh);
  251. }
  252. gfs2_trans_end(sdp);
  253. out_gunlock:
  254. gfs2_glock_dq_uninit(&rg_gh);
  255. return error;
  256. }
  257. static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
  258. struct gfs2_ea_header *ea,
  259. struct gfs2_ea_header *prev, int leave)
  260. {
  261. struct gfs2_alloc *al;
  262. int error;
  263. al = gfs2_alloc_get(ip);
  264. if (!al)
  265. return -ENOMEM;
  266. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  267. if (error)
  268. goto out_alloc;
  269. error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
  270. if (error)
  271. goto out_quota;
  272. error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
  273. gfs2_glock_dq_uninit(&al->al_ri_gh);
  274. out_quota:
  275. gfs2_quota_unhold(ip);
  276. out_alloc:
  277. gfs2_alloc_put(ip);
  278. return error;
  279. }
  280. struct ea_list {
  281. struct gfs2_ea_request *ei_er;
  282. unsigned int ei_size;
  283. };
  284. static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
  285. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  286. void *private)
  287. {
  288. struct ea_list *ei = private;
  289. struct gfs2_ea_request *er = ei->ei_er;
  290. unsigned int ea_size = gfs2_ea_strlen(ea);
  291. if (ea->ea_type == GFS2_EATYPE_UNUSED)
  292. return 0;
  293. if (er->er_data_len) {
  294. char *prefix = NULL;
  295. unsigned int l = 0;
  296. char c = 0;
  297. if (ei->ei_size + ea_size > er->er_data_len)
  298. return -ERANGE;
  299. switch (ea->ea_type) {
  300. case GFS2_EATYPE_USR:
  301. prefix = "user.";
  302. l = 5;
  303. break;
  304. case GFS2_EATYPE_SYS:
  305. prefix = "system.";
  306. l = 7;
  307. break;
  308. case GFS2_EATYPE_SECURITY:
  309. prefix = "security.";
  310. l = 9;
  311. break;
  312. }
  313. BUG_ON(l == 0);
  314. memcpy(er->er_data + ei->ei_size, prefix, l);
  315. memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
  316. ea->ea_name_len);
  317. memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
  318. }
  319. ei->ei_size += ea_size;
  320. return 0;
  321. }
  322. /**
  323. * gfs2_ea_list -
  324. * @ip:
  325. * @er:
  326. *
  327. * Returns: actual size of data on success, -errno on error
  328. */
  329. int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  330. {
  331. struct gfs2_holder i_gh;
  332. int error;
  333. if (!er->er_data || !er->er_data_len) {
  334. er->er_data = NULL;
  335. er->er_data_len = 0;
  336. }
  337. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  338. if (error)
  339. return error;
  340. if (ip->i_eattr) {
  341. struct ea_list ei = { .ei_er = er, .ei_size = 0 };
  342. error = ea_foreach(ip, ea_list_i, &ei);
  343. if (!error)
  344. error = ei.ei_size;
  345. }
  346. gfs2_glock_dq_uninit(&i_gh);
  347. return error;
  348. }
  349. /**
  350. * ea_get_unstuffed - actually copies the unstuffed data into the
  351. * request buffer
  352. * @ip: The GFS2 inode
  353. * @ea: The extended attribute header structure
  354. * @data: The data to be copied
  355. *
  356. * Returns: errno
  357. */
  358. static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
  359. char *data)
  360. {
  361. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  362. struct buffer_head **bh;
  363. unsigned int amount = GFS2_EA_DATA_LEN(ea);
  364. unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
  365. __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
  366. unsigned int x;
  367. int error = 0;
  368. bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
  369. if (!bh)
  370. return -ENOMEM;
  371. for (x = 0; x < nptrs; x++) {
  372. error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
  373. bh + x);
  374. if (error) {
  375. while (x--)
  376. brelse(bh[x]);
  377. goto out;
  378. }
  379. dataptrs++;
  380. }
  381. for (x = 0; x < nptrs; x++) {
  382. error = gfs2_meta_wait(sdp, bh[x]);
  383. if (error) {
  384. for (; x < nptrs; x++)
  385. brelse(bh[x]);
  386. goto out;
  387. }
  388. if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
  389. for (; x < nptrs; x++)
  390. brelse(bh[x]);
  391. error = -EIO;
  392. goto out;
  393. }
  394. memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
  395. (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
  396. amount -= sdp->sd_jbsize;
  397. data += sdp->sd_jbsize;
  398. brelse(bh[x]);
  399. }
  400. out:
  401. kfree(bh);
  402. return error;
  403. }
  404. int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
  405. char *data)
  406. {
  407. if (GFS2_EA_IS_STUFFED(el->el_ea)) {
  408. memcpy(data, GFS2_EA2DATA(el->el_ea), GFS2_EA_DATA_LEN(el->el_ea));
  409. return 0;
  410. } else
  411. return ea_get_unstuffed(ip, el->el_ea, data);
  412. }
  413. /**
  414. * gfs2_ea_get_i -
  415. * @ip: The GFS2 inode
  416. * @er: The request structure
  417. *
  418. * Returns: actual size of data on success, -errno on error
  419. */
  420. int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  421. {
  422. struct gfs2_ea_location el;
  423. int error;
  424. if (!ip->i_eattr)
  425. return -ENODATA;
  426. error = gfs2_ea_find(ip, er, &el);
  427. if (error)
  428. return error;
  429. if (!el.el_ea)
  430. return -ENODATA;
  431. if (er->er_data_len) {
  432. if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
  433. error = -ERANGE;
  434. else
  435. error = gfs2_ea_get_copy(ip, &el, er->er_data);
  436. }
  437. if (!error)
  438. error = GFS2_EA_DATA_LEN(el.el_ea);
  439. brelse(el.el_bh);
  440. return error;
  441. }
  442. /**
  443. * gfs2_ea_get -
  444. * @ip: The GFS2 inode
  445. * @er: The request structure
  446. *
  447. * Returns: actual size of data on success, -errno on error
  448. */
  449. int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  450. {
  451. struct gfs2_holder i_gh;
  452. int error;
  453. if (!er->er_name_len ||
  454. er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  455. return -EINVAL;
  456. if (!er->er_data || !er->er_data_len) {
  457. er->er_data = NULL;
  458. er->er_data_len = 0;
  459. }
  460. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  461. if (error)
  462. return error;
  463. error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
  464. gfs2_glock_dq_uninit(&i_gh);
  465. return error;
  466. }
  467. /**
  468. * ea_alloc_blk - allocates a new block for extended attributes.
  469. * @ip: A pointer to the inode that's getting extended attributes
  470. * @bhp: Pointer to pointer to a struct buffer_head
  471. *
  472. * Returns: errno
  473. */
  474. static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
  475. {
  476. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  477. struct gfs2_ea_header *ea;
  478. unsigned int n = 1;
  479. u64 block;
  480. block = gfs2_alloc_block(ip, &n);
  481. gfs2_trans_add_unrevoke(sdp, block, 1);
  482. *bhp = gfs2_meta_new(ip->i_gl, block);
  483. gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
  484. gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
  485. gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
  486. ea = GFS2_EA_BH2FIRST(*bhp);
  487. ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
  488. ea->ea_type = GFS2_EATYPE_UNUSED;
  489. ea->ea_flags = GFS2_EAFLAG_LAST;
  490. ea->ea_num_ptrs = 0;
  491. gfs2_add_inode_blocks(&ip->i_inode, 1);
  492. return 0;
  493. }
  494. /**
  495. * ea_write - writes the request info to an ea, creating new blocks if
  496. * necessary
  497. * @ip: inode that is being modified
  498. * @ea: the location of the new ea in a block
  499. * @er: the write request
  500. *
  501. * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
  502. *
  503. * returns : errno
  504. */
  505. static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
  506. struct gfs2_ea_request *er)
  507. {
  508. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  509. ea->ea_data_len = cpu_to_be32(er->er_data_len);
  510. ea->ea_name_len = er->er_name_len;
  511. ea->ea_type = er->er_type;
  512. ea->__pad = 0;
  513. memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
  514. if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
  515. ea->ea_num_ptrs = 0;
  516. memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
  517. } else {
  518. __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
  519. const char *data = er->er_data;
  520. unsigned int data_len = er->er_data_len;
  521. unsigned int copy;
  522. unsigned int x;
  523. ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
  524. for (x = 0; x < ea->ea_num_ptrs; x++) {
  525. struct buffer_head *bh;
  526. u64 block;
  527. int mh_size = sizeof(struct gfs2_meta_header);
  528. unsigned int n = 1;
  529. block = gfs2_alloc_block(ip, &n);
  530. gfs2_trans_add_unrevoke(sdp, block, 1);
  531. bh = gfs2_meta_new(ip->i_gl, block);
  532. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  533. gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
  534. gfs2_add_inode_blocks(&ip->i_inode, 1);
  535. copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
  536. data_len;
  537. memcpy(bh->b_data + mh_size, data, copy);
  538. if (copy < sdp->sd_jbsize)
  539. memset(bh->b_data + mh_size + copy, 0,
  540. sdp->sd_jbsize - copy);
  541. *dataptr++ = cpu_to_be64(bh->b_blocknr);
  542. data += copy;
  543. data_len -= copy;
  544. brelse(bh);
  545. }
  546. gfs2_assert_withdraw(sdp, !data_len);
  547. }
  548. return 0;
  549. }
  550. typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
  551. struct gfs2_ea_request *er, void *private);
  552. static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  553. unsigned int blks,
  554. ea_skeleton_call_t skeleton_call, void *private)
  555. {
  556. struct gfs2_alloc *al;
  557. struct buffer_head *dibh;
  558. int error;
  559. al = gfs2_alloc_get(ip);
  560. if (!al)
  561. return -ENOMEM;
  562. error = gfs2_quota_lock_check(ip);
  563. if (error)
  564. goto out;
  565. al->al_requested = blks;
  566. error = gfs2_inplace_reserve(ip);
  567. if (error)
  568. goto out_gunlock_q;
  569. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
  570. blks + al->al_rgd->rd_length +
  571. RES_DINODE + RES_STATFS + RES_QUOTA, 0);
  572. if (error)
  573. goto out_ipres;
  574. error = skeleton_call(ip, er, private);
  575. if (error)
  576. goto out_end_trans;
  577. error = gfs2_meta_inode_buffer(ip, &dibh);
  578. if (!error) {
  579. if (er->er_flags & GFS2_ERF_MODE) {
  580. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  581. (ip->i_inode.i_mode & S_IFMT) ==
  582. (er->er_mode & S_IFMT));
  583. ip->i_inode.i_mode = er->er_mode;
  584. }
  585. ip->i_inode.i_ctime = CURRENT_TIME;
  586. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  587. gfs2_dinode_out(ip, dibh->b_data);
  588. brelse(dibh);
  589. }
  590. out_end_trans:
  591. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  592. out_ipres:
  593. gfs2_inplace_release(ip);
  594. out_gunlock_q:
  595. gfs2_quota_unlock(ip);
  596. out:
  597. gfs2_alloc_put(ip);
  598. return error;
  599. }
  600. static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  601. void *private)
  602. {
  603. struct buffer_head *bh;
  604. int error;
  605. error = ea_alloc_blk(ip, &bh);
  606. if (error)
  607. return error;
  608. ip->i_eattr = bh->b_blocknr;
  609. error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
  610. brelse(bh);
  611. return error;
  612. }
  613. /**
  614. * ea_init - initializes a new eattr block
  615. * @ip:
  616. * @er:
  617. *
  618. * Returns: errno
  619. */
  620. static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  621. {
  622. unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
  623. unsigned int blks = 1;
  624. if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
  625. blks += DIV_ROUND_UP(er->er_data_len, jbsize);
  626. return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
  627. }
  628. static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
  629. {
  630. u32 ea_size = GFS2_EA_SIZE(ea);
  631. struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
  632. ea_size);
  633. u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
  634. int last = ea->ea_flags & GFS2_EAFLAG_LAST;
  635. ea->ea_rec_len = cpu_to_be32(ea_size);
  636. ea->ea_flags ^= last;
  637. new->ea_rec_len = cpu_to_be32(new_size);
  638. new->ea_flags = last;
  639. return new;
  640. }
  641. static void ea_set_remove_stuffed(struct gfs2_inode *ip,
  642. struct gfs2_ea_location *el)
  643. {
  644. struct gfs2_ea_header *ea = el->el_ea;
  645. struct gfs2_ea_header *prev = el->el_prev;
  646. u32 len;
  647. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  648. if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
  649. ea->ea_type = GFS2_EATYPE_UNUSED;
  650. return;
  651. } else if (GFS2_EA2NEXT(prev) != ea) {
  652. prev = GFS2_EA2NEXT(prev);
  653. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
  654. }
  655. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  656. prev->ea_rec_len = cpu_to_be32(len);
  657. if (GFS2_EA_IS_LAST(ea))
  658. prev->ea_flags |= GFS2_EAFLAG_LAST;
  659. }
  660. struct ea_set {
  661. int ea_split;
  662. struct gfs2_ea_request *es_er;
  663. struct gfs2_ea_location *es_el;
  664. struct buffer_head *es_bh;
  665. struct gfs2_ea_header *es_ea;
  666. };
  667. static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
  668. struct gfs2_ea_header *ea, struct ea_set *es)
  669. {
  670. struct gfs2_ea_request *er = es->es_er;
  671. struct buffer_head *dibh;
  672. int error;
  673. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
  674. if (error)
  675. return error;
  676. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  677. if (es->ea_split)
  678. ea = ea_split_ea(ea);
  679. ea_write(ip, ea, er);
  680. if (es->es_el)
  681. ea_set_remove_stuffed(ip, es->es_el);
  682. error = gfs2_meta_inode_buffer(ip, &dibh);
  683. if (error)
  684. goto out;
  685. if (er->er_flags & GFS2_ERF_MODE) {
  686. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  687. (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
  688. ip->i_inode.i_mode = er->er_mode;
  689. }
  690. ip->i_inode.i_ctime = CURRENT_TIME;
  691. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  692. gfs2_dinode_out(ip, dibh->b_data);
  693. brelse(dibh);
  694. out:
  695. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  696. return error;
  697. }
  698. static int ea_set_simple_alloc(struct gfs2_inode *ip,
  699. struct gfs2_ea_request *er, void *private)
  700. {
  701. struct ea_set *es = private;
  702. struct gfs2_ea_header *ea = es->es_ea;
  703. int error;
  704. gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
  705. if (es->ea_split)
  706. ea = ea_split_ea(ea);
  707. error = ea_write(ip, ea, er);
  708. if (error)
  709. return error;
  710. if (es->es_el)
  711. ea_set_remove_stuffed(ip, es->es_el);
  712. return 0;
  713. }
  714. static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
  715. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  716. void *private)
  717. {
  718. struct ea_set *es = private;
  719. unsigned int size;
  720. int stuffed;
  721. int error;
  722. stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er, &size);
  723. if (ea->ea_type == GFS2_EATYPE_UNUSED) {
  724. if (GFS2_EA_REC_LEN(ea) < size)
  725. return 0;
  726. if (!GFS2_EA_IS_STUFFED(ea)) {
  727. error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
  728. if (error)
  729. return error;
  730. }
  731. es->ea_split = 0;
  732. } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
  733. es->ea_split = 1;
  734. else
  735. return 0;
  736. if (stuffed) {
  737. error = ea_set_simple_noalloc(ip, bh, ea, es);
  738. if (error)
  739. return error;
  740. } else {
  741. unsigned int blks;
  742. es->es_bh = bh;
  743. es->es_ea = ea;
  744. blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
  745. GFS2_SB(&ip->i_inode)->sd_jbsize);
  746. error = ea_alloc_skeleton(ip, es->es_er, blks,
  747. ea_set_simple_alloc, es);
  748. if (error)
  749. return error;
  750. }
  751. return 1;
  752. }
  753. static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  754. void *private)
  755. {
  756. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  757. struct buffer_head *indbh, *newbh;
  758. __be64 *eablk;
  759. int error;
  760. int mh_size = sizeof(struct gfs2_meta_header);
  761. if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
  762. __be64 *end;
  763. error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT,
  764. &indbh);
  765. if (error)
  766. return error;
  767. if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
  768. error = -EIO;
  769. goto out;
  770. }
  771. eablk = (__be64 *)(indbh->b_data + mh_size);
  772. end = eablk + sdp->sd_inptrs;
  773. for (; eablk < end; eablk++)
  774. if (!*eablk)
  775. break;
  776. if (eablk == end) {
  777. error = -ENOSPC;
  778. goto out;
  779. }
  780. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  781. } else {
  782. u64 blk;
  783. unsigned int n = 1;
  784. blk = gfs2_alloc_block(ip, &n);
  785. gfs2_trans_add_unrevoke(sdp, blk, 1);
  786. indbh = gfs2_meta_new(ip->i_gl, blk);
  787. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  788. gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
  789. gfs2_buffer_clear_tail(indbh, mh_size);
  790. eablk = (__be64 *)(indbh->b_data + mh_size);
  791. *eablk = cpu_to_be64(ip->i_eattr);
  792. ip->i_eattr = blk;
  793. ip->i_diskflags |= GFS2_DIF_EA_INDIRECT;
  794. gfs2_add_inode_blocks(&ip->i_inode, 1);
  795. eablk++;
  796. }
  797. error = ea_alloc_blk(ip, &newbh);
  798. if (error)
  799. goto out;
  800. *eablk = cpu_to_be64((u64)newbh->b_blocknr);
  801. error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
  802. brelse(newbh);
  803. if (error)
  804. goto out;
  805. if (private)
  806. ea_set_remove_stuffed(ip, private);
  807. out:
  808. brelse(indbh);
  809. return error;
  810. }
  811. static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  812. struct gfs2_ea_location *el)
  813. {
  814. struct ea_set es;
  815. unsigned int blks = 2;
  816. int error;
  817. memset(&es, 0, sizeof(struct ea_set));
  818. es.es_er = er;
  819. es.es_el = el;
  820. error = ea_foreach(ip, ea_set_simple, &es);
  821. if (error > 0)
  822. return 0;
  823. if (error)
  824. return error;
  825. if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
  826. blks++;
  827. if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
  828. blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
  829. return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
  830. }
  831. static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
  832. struct gfs2_ea_location *el)
  833. {
  834. if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
  835. el->el_prev = GFS2_EA2NEXT(el->el_prev);
  836. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  837. GFS2_EA2NEXT(el->el_prev) == el->el_ea);
  838. }
  839. return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
  840. }
  841. int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  842. {
  843. struct gfs2_ea_location el;
  844. int error;
  845. if (!ip->i_eattr) {
  846. if (er->er_flags & XATTR_REPLACE)
  847. return -ENODATA;
  848. return ea_init(ip, er);
  849. }
  850. error = gfs2_ea_find(ip, er, &el);
  851. if (error)
  852. return error;
  853. if (el.el_ea) {
  854. if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
  855. brelse(el.el_bh);
  856. return -EPERM;
  857. }
  858. error = -EEXIST;
  859. if (!(er->er_flags & XATTR_CREATE)) {
  860. int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
  861. error = ea_set_i(ip, er, &el);
  862. if (!error && unstuffed)
  863. ea_set_remove_unstuffed(ip, &el);
  864. }
  865. brelse(el.el_bh);
  866. } else {
  867. error = -ENODATA;
  868. if (!(er->er_flags & XATTR_REPLACE))
  869. error = ea_set_i(ip, er, NULL);
  870. }
  871. return error;
  872. }
  873. int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  874. {
  875. struct gfs2_holder i_gh;
  876. int error;
  877. if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  878. return -EINVAL;
  879. if (!er->er_data || !er->er_data_len) {
  880. er->er_data = NULL;
  881. er->er_data_len = 0;
  882. }
  883. error = ea_check_size(GFS2_SB(&ip->i_inode), er);
  884. if (error)
  885. return error;
  886. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  887. if (error)
  888. return error;
  889. if (IS_IMMUTABLE(&ip->i_inode))
  890. error = -EPERM;
  891. else
  892. error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
  893. gfs2_glock_dq_uninit(&i_gh);
  894. return error;
  895. }
  896. static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
  897. {
  898. struct gfs2_ea_header *ea = el->el_ea;
  899. struct gfs2_ea_header *prev = el->el_prev;
  900. struct buffer_head *dibh;
  901. int error;
  902. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
  903. if (error)
  904. return error;
  905. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  906. if (prev) {
  907. u32 len;
  908. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  909. prev->ea_rec_len = cpu_to_be32(len);
  910. if (GFS2_EA_IS_LAST(ea))
  911. prev->ea_flags |= GFS2_EAFLAG_LAST;
  912. } else
  913. ea->ea_type = GFS2_EATYPE_UNUSED;
  914. error = gfs2_meta_inode_buffer(ip, &dibh);
  915. if (!error) {
  916. ip->i_inode.i_ctime = CURRENT_TIME;
  917. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  918. gfs2_dinode_out(ip, dibh->b_data);
  919. brelse(dibh);
  920. }
  921. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  922. return error;
  923. }
  924. int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  925. {
  926. struct gfs2_ea_location el;
  927. int error;
  928. if (!ip->i_eattr)
  929. return -ENODATA;
  930. error = gfs2_ea_find(ip, er, &el);
  931. if (error)
  932. return error;
  933. if (!el.el_ea)
  934. return -ENODATA;
  935. if (GFS2_EA_IS_STUFFED(el.el_ea))
  936. error = ea_remove_stuffed(ip, &el);
  937. else
  938. error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
  939. 0);
  940. brelse(el.el_bh);
  941. return error;
  942. }
  943. /**
  944. * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
  945. * @ip: pointer to the inode of the target file
  946. * @er: request information
  947. *
  948. * Returns: errno
  949. */
  950. int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  951. {
  952. struct gfs2_holder i_gh;
  953. int error;
  954. if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  955. return -EINVAL;
  956. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  957. if (error)
  958. return error;
  959. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  960. error = -EPERM;
  961. else
  962. error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
  963. gfs2_glock_dq_uninit(&i_gh);
  964. return error;
  965. }
  966. static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
  967. struct gfs2_ea_header *ea, char *data)
  968. {
  969. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  970. struct buffer_head **bh;
  971. unsigned int amount = GFS2_EA_DATA_LEN(ea);
  972. unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
  973. __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
  974. unsigned int x;
  975. int error;
  976. bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
  977. if (!bh)
  978. return -ENOMEM;
  979. error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
  980. if (error)
  981. goto out;
  982. for (x = 0; x < nptrs; x++) {
  983. error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
  984. bh + x);
  985. if (error) {
  986. while (x--)
  987. brelse(bh[x]);
  988. goto fail;
  989. }
  990. dataptrs++;
  991. }
  992. for (x = 0; x < nptrs; x++) {
  993. error = gfs2_meta_wait(sdp, bh[x]);
  994. if (error) {
  995. for (; x < nptrs; x++)
  996. brelse(bh[x]);
  997. goto fail;
  998. }
  999. if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
  1000. for (; x < nptrs; x++)
  1001. brelse(bh[x]);
  1002. error = -EIO;
  1003. goto fail;
  1004. }
  1005. gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
  1006. memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
  1007. (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
  1008. amount -= sdp->sd_jbsize;
  1009. data += sdp->sd_jbsize;
  1010. brelse(bh[x]);
  1011. }
  1012. out:
  1013. kfree(bh);
  1014. return error;
  1015. fail:
  1016. gfs2_trans_end(sdp);
  1017. kfree(bh);
  1018. return error;
  1019. }
  1020. int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
  1021. struct iattr *attr, char *data)
  1022. {
  1023. struct buffer_head *dibh;
  1024. int error;
  1025. if (GFS2_EA_IS_STUFFED(el->el_ea)) {
  1026. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
  1027. if (error)
  1028. return error;
  1029. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  1030. memcpy(GFS2_EA2DATA(el->el_ea), data,
  1031. GFS2_EA_DATA_LEN(el->el_ea));
  1032. } else
  1033. error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
  1034. if (error)
  1035. return error;
  1036. error = gfs2_meta_inode_buffer(ip, &dibh);
  1037. if (!error) {
  1038. error = inode_setattr(&ip->i_inode, attr);
  1039. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1040. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1041. gfs2_dinode_out(ip, dibh->b_data);
  1042. brelse(dibh);
  1043. }
  1044. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1045. return error;
  1046. }
  1047. static int ea_dealloc_indirect(struct gfs2_inode *ip)
  1048. {
  1049. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1050. struct gfs2_rgrp_list rlist;
  1051. struct buffer_head *indbh, *dibh;
  1052. __be64 *eablk, *end;
  1053. unsigned int rg_blocks = 0;
  1054. u64 bstart = 0;
  1055. unsigned int blen = 0;
  1056. unsigned int blks = 0;
  1057. unsigned int x;
  1058. int error;
  1059. memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
  1060. error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh);
  1061. if (error)
  1062. return error;
  1063. if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
  1064. error = -EIO;
  1065. goto out;
  1066. }
  1067. eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
  1068. end = eablk + sdp->sd_inptrs;
  1069. for (; eablk < end; eablk++) {
  1070. u64 bn;
  1071. if (!*eablk)
  1072. break;
  1073. bn = be64_to_cpu(*eablk);
  1074. if (bstart + blen == bn)
  1075. blen++;
  1076. else {
  1077. if (bstart)
  1078. gfs2_rlist_add(sdp, &rlist, bstart);
  1079. bstart = bn;
  1080. blen = 1;
  1081. }
  1082. blks++;
  1083. }
  1084. if (bstart)
  1085. gfs2_rlist_add(sdp, &rlist, bstart);
  1086. else
  1087. goto out;
  1088. gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
  1089. for (x = 0; x < rlist.rl_rgrps; x++) {
  1090. struct gfs2_rgrpd *rgd;
  1091. rgd = rlist.rl_ghs[x].gh_gl->gl_object;
  1092. rg_blocks += rgd->rd_length;
  1093. }
  1094. error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
  1095. if (error)
  1096. goto out_rlist_free;
  1097. error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
  1098. RES_STATFS + RES_QUOTA, blks);
  1099. if (error)
  1100. goto out_gunlock;
  1101. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  1102. eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
  1103. bstart = 0;
  1104. blen = 0;
  1105. for (; eablk < end; eablk++) {
  1106. u64 bn;
  1107. if (!*eablk)
  1108. break;
  1109. bn = be64_to_cpu(*eablk);
  1110. if (bstart + blen == bn)
  1111. blen++;
  1112. else {
  1113. if (bstart)
  1114. gfs2_free_meta(ip, bstart, blen);
  1115. bstart = bn;
  1116. blen = 1;
  1117. }
  1118. *eablk = 0;
  1119. gfs2_add_inode_blocks(&ip->i_inode, -1);
  1120. }
  1121. if (bstart)
  1122. gfs2_free_meta(ip, bstart, blen);
  1123. ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
  1124. error = gfs2_meta_inode_buffer(ip, &dibh);
  1125. if (!error) {
  1126. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1127. gfs2_dinode_out(ip, dibh->b_data);
  1128. brelse(dibh);
  1129. }
  1130. gfs2_trans_end(sdp);
  1131. out_gunlock:
  1132. gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
  1133. out_rlist_free:
  1134. gfs2_rlist_free(&rlist);
  1135. out:
  1136. brelse(indbh);
  1137. return error;
  1138. }
  1139. static int ea_dealloc_block(struct gfs2_inode *ip)
  1140. {
  1141. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1142. struct gfs2_alloc *al = ip->i_alloc;
  1143. struct gfs2_rgrpd *rgd;
  1144. struct buffer_head *dibh;
  1145. int error;
  1146. rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr);
  1147. if (!rgd) {
  1148. gfs2_consist_inode(ip);
  1149. return -EIO;
  1150. }
  1151. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  1152. &al->al_rgd_gh);
  1153. if (error)
  1154. return error;
  1155. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
  1156. RES_QUOTA, 1);
  1157. if (error)
  1158. goto out_gunlock;
  1159. gfs2_free_meta(ip, ip->i_eattr, 1);
  1160. ip->i_eattr = 0;
  1161. gfs2_add_inode_blocks(&ip->i_inode, -1);
  1162. error = gfs2_meta_inode_buffer(ip, &dibh);
  1163. if (!error) {
  1164. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1165. gfs2_dinode_out(ip, dibh->b_data);
  1166. brelse(dibh);
  1167. }
  1168. gfs2_trans_end(sdp);
  1169. out_gunlock:
  1170. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  1171. return error;
  1172. }
  1173. /**
  1174. * gfs2_ea_dealloc - deallocate the extended attribute fork
  1175. * @ip: the inode
  1176. *
  1177. * Returns: errno
  1178. */
  1179. int gfs2_ea_dealloc(struct gfs2_inode *ip)
  1180. {
  1181. struct gfs2_alloc *al;
  1182. int error;
  1183. al = gfs2_alloc_get(ip);
  1184. if (!al)
  1185. return -ENOMEM;
  1186. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  1187. if (error)
  1188. goto out_alloc;
  1189. error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
  1190. if (error)
  1191. goto out_quota;
  1192. error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
  1193. if (error)
  1194. goto out_rindex;
  1195. if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
  1196. error = ea_dealloc_indirect(ip);
  1197. if (error)
  1198. goto out_rindex;
  1199. }
  1200. error = ea_dealloc_block(ip);
  1201. out_rindex:
  1202. gfs2_glock_dq_uninit(&al->al_ri_gh);
  1203. out_quota:
  1204. gfs2_quota_unhold(ip);
  1205. out_alloc:
  1206. gfs2_alloc_put(ip);
  1207. return error;
  1208. }