eattr.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/completion.h>
  12. #include <linux/buffer_head.h>
  13. #include <linux/xattr.h>
  14. #include <linux/gfs2_ondisk.h>
  15. #include <linux/lm_interface.h>
  16. #include <asm/uaccess.h>
  17. #include "gfs2.h"
  18. #include "incore.h"
  19. #include "acl.h"
  20. #include "eaops.h"
  21. #include "eattr.h"
  22. #include "glock.h"
  23. #include "inode.h"
  24. #include "meta_io.h"
  25. #include "quota.h"
  26. #include "rgrp.h"
  27. #include "trans.h"
  28. #include "util.h"
  29. /**
  30. * ea_calc_size - returns the acutal number of bytes the request will take up
  31. * (not counting any unstuffed data blocks)
  32. * @sdp:
  33. * @er:
  34. * @size:
  35. *
  36. * Returns: 1 if the EA should be stuffed
  37. */
  38. static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
  39. unsigned int *size)
  40. {
  41. *size = GFS2_EAREQ_SIZE_STUFFED(er);
  42. if (*size <= sdp->sd_jbsize)
  43. return 1;
  44. *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
  45. return 0;
  46. }
  47. static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
  48. {
  49. unsigned int size;
  50. if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
  51. return -ERANGE;
  52. ea_calc_size(sdp, er, &size);
  53. /* This can only happen with 512 byte blocks */
  54. if (size > sdp->sd_jbsize)
  55. return -ERANGE;
  56. return 0;
  57. }
  58. typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
  59. struct gfs2_ea_header *ea,
  60. struct gfs2_ea_header *prev, void *private);
  61. static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
  62. ea_call_t ea_call, void *data)
  63. {
  64. struct gfs2_ea_header *ea, *prev = NULL;
  65. int error = 0;
  66. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
  67. return -EIO;
  68. for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
  69. if (!GFS2_EA_REC_LEN(ea))
  70. goto fail;
  71. if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
  72. bh->b_data + bh->b_size))
  73. goto fail;
  74. if (!GFS2_EATYPE_VALID(ea->ea_type))
  75. goto fail;
  76. error = ea_call(ip, bh, ea, prev, data);
  77. if (error)
  78. return error;
  79. if (GFS2_EA_IS_LAST(ea)) {
  80. if ((char *)GFS2_EA2NEXT(ea) !=
  81. bh->b_data + bh->b_size)
  82. goto fail;
  83. break;
  84. }
  85. }
  86. return error;
  87. fail:
  88. gfs2_consist_inode(ip);
  89. return -EIO;
  90. }
  91. static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
  92. {
  93. struct buffer_head *bh, *eabh;
  94. __be64 *eablk, *end;
  95. int error;
  96. error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
  97. if (error)
  98. return error;
  99. if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
  100. error = ea_foreach_i(ip, bh, ea_call, data);
  101. goto out;
  102. }
  103. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
  104. error = -EIO;
  105. goto out;
  106. }
  107. eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
  108. end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
  109. for (; eablk < end; eablk++) {
  110. u64 bn;
  111. if (!*eablk)
  112. break;
  113. bn = be64_to_cpu(*eablk);
  114. error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
  115. if (error)
  116. break;
  117. error = ea_foreach_i(ip, eabh, ea_call, data);
  118. brelse(eabh);
  119. if (error)
  120. break;
  121. }
  122. out:
  123. brelse(bh);
  124. return error;
  125. }
  126. struct ea_find {
  127. struct gfs2_ea_request *ef_er;
  128. struct gfs2_ea_location *ef_el;
  129. };
  130. static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
  131. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  132. void *private)
  133. {
  134. struct ea_find *ef = private;
  135. struct gfs2_ea_request *er = ef->ef_er;
  136. if (ea->ea_type == GFS2_EATYPE_UNUSED)
  137. return 0;
  138. if (ea->ea_type == er->er_type) {
  139. if (ea->ea_name_len == er->er_name_len &&
  140. !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
  141. struct gfs2_ea_location *el = ef->ef_el;
  142. get_bh(bh);
  143. el->el_bh = bh;
  144. el->el_ea = ea;
  145. el->el_prev = prev;
  146. return 1;
  147. }
  148. }
  149. return 0;
  150. }
  151. int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  152. struct gfs2_ea_location *el)
  153. {
  154. struct ea_find ef;
  155. int error;
  156. ef.ef_er = er;
  157. ef.ef_el = el;
  158. memset(el, 0, sizeof(struct gfs2_ea_location));
  159. error = ea_foreach(ip, ea_find_i, &ef);
  160. if (error > 0)
  161. return 0;
  162. return error;
  163. }
  164. /**
  165. * ea_dealloc_unstuffed -
  166. * @ip:
  167. * @bh:
  168. * @ea:
  169. * @prev:
  170. * @private:
  171. *
  172. * Take advantage of the fact that all unstuffed blocks are
  173. * allocated from the same RG. But watch, this may not always
  174. * be true.
  175. *
  176. * Returns: errno
  177. */
  178. static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
  179. struct gfs2_ea_header *ea,
  180. struct gfs2_ea_header *prev, void *private)
  181. {
  182. int *leave = private;
  183. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  184. struct gfs2_rgrpd *rgd;
  185. struct gfs2_holder rg_gh;
  186. struct buffer_head *dibh;
  187. __be64 *dataptrs;
  188. u64 bn = 0;
  189. u64 bstart = 0;
  190. unsigned int blen = 0;
  191. unsigned int blks = 0;
  192. unsigned int x;
  193. int error;
  194. if (GFS2_EA_IS_STUFFED(ea))
  195. return 0;
  196. dataptrs = GFS2_EA2DATAPTRS(ea);
  197. for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
  198. if (*dataptrs) {
  199. blks++;
  200. bn = be64_to_cpu(*dataptrs);
  201. }
  202. }
  203. if (!blks)
  204. return 0;
  205. rgd = gfs2_blk2rgrpd(sdp, bn);
  206. if (!rgd) {
  207. gfs2_consist_inode(ip);
  208. return -EIO;
  209. }
  210. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
  211. if (error)
  212. return error;
  213. error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
  214. RES_EATTR + RES_STATFS + RES_QUOTA, blks);
  215. if (error)
  216. goto out_gunlock;
  217. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  218. dataptrs = GFS2_EA2DATAPTRS(ea);
  219. for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
  220. if (!*dataptrs)
  221. break;
  222. bn = be64_to_cpu(*dataptrs);
  223. if (bstart + blen == bn)
  224. blen++;
  225. else {
  226. if (bstart)
  227. gfs2_free_meta(ip, bstart, blen);
  228. bstart = bn;
  229. blen = 1;
  230. }
  231. *dataptrs = 0;
  232. if (!ip->i_di.di_blocks)
  233. gfs2_consist_inode(ip);
  234. ip->i_di.di_blocks--;
  235. gfs2_set_inode_blocks(&ip->i_inode);
  236. }
  237. if (bstart)
  238. gfs2_free_meta(ip, bstart, blen);
  239. if (prev && !leave) {
  240. u32 len;
  241. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  242. prev->ea_rec_len = cpu_to_be32(len);
  243. if (GFS2_EA_IS_LAST(ea))
  244. prev->ea_flags |= GFS2_EAFLAG_LAST;
  245. } else {
  246. ea->ea_type = GFS2_EATYPE_UNUSED;
  247. ea->ea_num_ptrs = 0;
  248. }
  249. error = gfs2_meta_inode_buffer(ip, &dibh);
  250. if (!error) {
  251. ip->i_inode.i_ctime = CURRENT_TIME;
  252. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  253. gfs2_dinode_out(ip, dibh->b_data);
  254. brelse(dibh);
  255. }
  256. gfs2_trans_end(sdp);
  257. out_gunlock:
  258. gfs2_glock_dq_uninit(&rg_gh);
  259. return error;
  260. }
  261. static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
  262. struct gfs2_ea_header *ea,
  263. struct gfs2_ea_header *prev, int leave)
  264. {
  265. struct gfs2_alloc *al;
  266. int error;
  267. al = gfs2_alloc_get(ip);
  268. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  269. if (error)
  270. goto out_alloc;
  271. error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
  272. if (error)
  273. goto out_quota;
  274. error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
  275. gfs2_glock_dq_uninit(&al->al_ri_gh);
  276. out_quota:
  277. gfs2_quota_unhold(ip);
  278. out_alloc:
  279. gfs2_alloc_put(ip);
  280. return error;
  281. }
  282. struct ea_list {
  283. struct gfs2_ea_request *ei_er;
  284. unsigned int ei_size;
  285. };
  286. static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
  287. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  288. void *private)
  289. {
  290. struct ea_list *ei = private;
  291. struct gfs2_ea_request *er = ei->ei_er;
  292. unsigned int ea_size = gfs2_ea_strlen(ea);
  293. if (ea->ea_type == GFS2_EATYPE_UNUSED)
  294. return 0;
  295. if (er->er_data_len) {
  296. char *prefix = NULL;
  297. unsigned int l = 0;
  298. char c = 0;
  299. if (ei->ei_size + ea_size > er->er_data_len)
  300. return -ERANGE;
  301. switch (ea->ea_type) {
  302. case GFS2_EATYPE_USR:
  303. prefix = "user.";
  304. l = 5;
  305. break;
  306. case GFS2_EATYPE_SYS:
  307. prefix = "system.";
  308. l = 7;
  309. break;
  310. case GFS2_EATYPE_SECURITY:
  311. prefix = "security.";
  312. l = 9;
  313. break;
  314. }
  315. BUG_ON(l == 0);
  316. memcpy(er->er_data + ei->ei_size, prefix, l);
  317. memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
  318. ea->ea_name_len);
  319. memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
  320. }
  321. ei->ei_size += ea_size;
  322. return 0;
  323. }
  324. /**
  325. * gfs2_ea_list -
  326. * @ip:
  327. * @er:
  328. *
  329. * Returns: actual size of data on success, -errno on error
  330. */
  331. int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  332. {
  333. struct gfs2_holder i_gh;
  334. int error;
  335. if (!er->er_data || !er->er_data_len) {
  336. er->er_data = NULL;
  337. er->er_data_len = 0;
  338. }
  339. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  340. if (error)
  341. return error;
  342. if (ip->i_di.di_eattr) {
  343. struct ea_list ei = { .ei_er = er, .ei_size = 0 };
  344. error = ea_foreach(ip, ea_list_i, &ei);
  345. if (!error)
  346. error = ei.ei_size;
  347. }
  348. gfs2_glock_dq_uninit(&i_gh);
  349. return error;
  350. }
  351. /**
  352. * ea_get_unstuffed - actually copies the unstuffed data into the
  353. * request buffer
  354. * @ip: The GFS2 inode
  355. * @ea: The extended attribute header structure
  356. * @data: The data to be copied
  357. *
  358. * Returns: errno
  359. */
  360. static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
  361. char *data)
  362. {
  363. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  364. struct buffer_head **bh;
  365. unsigned int amount = GFS2_EA_DATA_LEN(ea);
  366. unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
  367. __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
  368. unsigned int x;
  369. int error = 0;
  370. bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
  371. if (!bh)
  372. return -ENOMEM;
  373. for (x = 0; x < nptrs; x++) {
  374. error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
  375. bh + x);
  376. if (error) {
  377. while (x--)
  378. brelse(bh[x]);
  379. goto out;
  380. }
  381. dataptrs++;
  382. }
  383. for (x = 0; x < nptrs; x++) {
  384. error = gfs2_meta_wait(sdp, bh[x]);
  385. if (error) {
  386. for (; x < nptrs; x++)
  387. brelse(bh[x]);
  388. goto out;
  389. }
  390. if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
  391. for (; x < nptrs; x++)
  392. brelse(bh[x]);
  393. error = -EIO;
  394. goto out;
  395. }
  396. memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
  397. (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
  398. amount -= sdp->sd_jbsize;
  399. data += sdp->sd_jbsize;
  400. brelse(bh[x]);
  401. }
  402. out:
  403. kfree(bh);
  404. return error;
  405. }
  406. int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
  407. char *data)
  408. {
  409. if (GFS2_EA_IS_STUFFED(el->el_ea)) {
  410. memcpy(data, GFS2_EA2DATA(el->el_ea), GFS2_EA_DATA_LEN(el->el_ea));
  411. return 0;
  412. } else
  413. return ea_get_unstuffed(ip, el->el_ea, data);
  414. }
  415. /**
  416. * gfs2_ea_get_i -
  417. * @ip: The GFS2 inode
  418. * @er: The request structure
  419. *
  420. * Returns: actual size of data on success, -errno on error
  421. */
  422. int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  423. {
  424. struct gfs2_ea_location el;
  425. int error;
  426. if (!ip->i_di.di_eattr)
  427. return -ENODATA;
  428. error = gfs2_ea_find(ip, er, &el);
  429. if (error)
  430. return error;
  431. if (!el.el_ea)
  432. return -ENODATA;
  433. if (er->er_data_len) {
  434. if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
  435. error = -ERANGE;
  436. else
  437. error = gfs2_ea_get_copy(ip, &el, er->er_data);
  438. }
  439. if (!error)
  440. error = GFS2_EA_DATA_LEN(el.el_ea);
  441. brelse(el.el_bh);
  442. return error;
  443. }
  444. /**
  445. * gfs2_ea_get -
  446. * @ip: The GFS2 inode
  447. * @er: The request structure
  448. *
  449. * Returns: actual size of data on success, -errno on error
  450. */
  451. int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  452. {
  453. struct gfs2_holder i_gh;
  454. int error;
  455. if (!er->er_name_len ||
  456. er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  457. return -EINVAL;
  458. if (!er->er_data || !er->er_data_len) {
  459. er->er_data = NULL;
  460. er->er_data_len = 0;
  461. }
  462. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  463. if (error)
  464. return error;
  465. error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
  466. gfs2_glock_dq_uninit(&i_gh);
  467. return error;
  468. }
  469. /**
  470. * ea_alloc_blk - allocates a new block for extended attributes.
  471. * @ip: A pointer to the inode that's getting extended attributes
  472. * @bhp: Pointer to pointer to a struct buffer_head
  473. *
  474. * Returns: errno
  475. */
  476. static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
  477. {
  478. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  479. struct gfs2_ea_header *ea;
  480. u64 block;
  481. block = gfs2_alloc_block(ip);
  482. gfs2_trans_add_unrevoke(sdp, block, 1);
  483. *bhp = gfs2_meta_new(ip->i_gl, block);
  484. gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
  485. gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
  486. gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
  487. ea = GFS2_EA_BH2FIRST(*bhp);
  488. ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
  489. ea->ea_type = GFS2_EATYPE_UNUSED;
  490. ea->ea_flags = GFS2_EAFLAG_LAST;
  491. ea->ea_num_ptrs = 0;
  492. ip->i_di.di_blocks++;
  493. gfs2_set_inode_blocks(&ip->i_inode);
  494. return 0;
  495. }
  496. /**
  497. * ea_write - writes the request info to an ea, creating new blocks if
  498. * necessary
  499. * @ip: inode that is being modified
  500. * @ea: the location of the new ea in a block
  501. * @er: the write request
  502. *
  503. * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
  504. *
  505. * returns : errno
  506. */
  507. static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
  508. struct gfs2_ea_request *er)
  509. {
  510. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  511. ea->ea_data_len = cpu_to_be32(er->er_data_len);
  512. ea->ea_name_len = er->er_name_len;
  513. ea->ea_type = er->er_type;
  514. ea->__pad = 0;
  515. memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
  516. if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
  517. ea->ea_num_ptrs = 0;
  518. memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
  519. } else {
  520. __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
  521. const char *data = er->er_data;
  522. unsigned int data_len = er->er_data_len;
  523. unsigned int copy;
  524. unsigned int x;
  525. ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
  526. for (x = 0; x < ea->ea_num_ptrs; x++) {
  527. struct buffer_head *bh;
  528. u64 block;
  529. int mh_size = sizeof(struct gfs2_meta_header);
  530. block = gfs2_alloc_block(ip);
  531. gfs2_trans_add_unrevoke(sdp, block, 1);
  532. bh = gfs2_meta_new(ip->i_gl, block);
  533. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  534. gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
  535. ip->i_di.di_blocks++;
  536. gfs2_set_inode_blocks(&ip->i_inode);
  537. copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
  538. data_len;
  539. memcpy(bh->b_data + mh_size, data, copy);
  540. if (copy < sdp->sd_jbsize)
  541. memset(bh->b_data + mh_size + copy, 0,
  542. sdp->sd_jbsize - copy);
  543. *dataptr++ = cpu_to_be64(bh->b_blocknr);
  544. data += copy;
  545. data_len -= copy;
  546. brelse(bh);
  547. }
  548. gfs2_assert_withdraw(sdp, !data_len);
  549. }
  550. return 0;
  551. }
  552. typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
  553. struct gfs2_ea_request *er, void *private);
  554. static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  555. unsigned int blks,
  556. ea_skeleton_call_t skeleton_call, void *private)
  557. {
  558. struct gfs2_alloc *al;
  559. struct buffer_head *dibh;
  560. int error;
  561. al = gfs2_alloc_get(ip);
  562. error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  563. if (error)
  564. goto out;
  565. error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
  566. if (error)
  567. goto out_gunlock_q;
  568. al->al_requested = blks;
  569. error = gfs2_inplace_reserve(ip);
  570. if (error)
  571. goto out_gunlock_q;
  572. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
  573. blks + al->al_rgd->rd_length +
  574. RES_DINODE + RES_STATFS + RES_QUOTA, 0);
  575. if (error)
  576. goto out_ipres;
  577. error = skeleton_call(ip, er, private);
  578. if (error)
  579. goto out_end_trans;
  580. error = gfs2_meta_inode_buffer(ip, &dibh);
  581. if (!error) {
  582. if (er->er_flags & GFS2_ERF_MODE) {
  583. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  584. (ip->i_inode.i_mode & S_IFMT) ==
  585. (er->er_mode & S_IFMT));
  586. ip->i_inode.i_mode = er->er_mode;
  587. }
  588. ip->i_inode.i_ctime = CURRENT_TIME;
  589. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  590. gfs2_dinode_out(ip, dibh->b_data);
  591. brelse(dibh);
  592. }
  593. out_end_trans:
  594. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  595. out_ipres:
  596. gfs2_inplace_release(ip);
  597. out_gunlock_q:
  598. gfs2_quota_unlock(ip);
  599. out:
  600. gfs2_alloc_put(ip);
  601. return error;
  602. }
  603. static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  604. void *private)
  605. {
  606. struct buffer_head *bh;
  607. int error;
  608. error = ea_alloc_blk(ip, &bh);
  609. if (error)
  610. return error;
  611. ip->i_di.di_eattr = bh->b_blocknr;
  612. error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
  613. brelse(bh);
  614. return error;
  615. }
  616. /**
  617. * ea_init - initializes a new eattr block
  618. * @ip:
  619. * @er:
  620. *
  621. * Returns: errno
  622. */
  623. static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  624. {
  625. unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
  626. unsigned int blks = 1;
  627. if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
  628. blks += DIV_ROUND_UP(er->er_data_len, jbsize);
  629. return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
  630. }
  631. static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
  632. {
  633. u32 ea_size = GFS2_EA_SIZE(ea);
  634. struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
  635. ea_size);
  636. u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
  637. int last = ea->ea_flags & GFS2_EAFLAG_LAST;
  638. ea->ea_rec_len = cpu_to_be32(ea_size);
  639. ea->ea_flags ^= last;
  640. new->ea_rec_len = cpu_to_be32(new_size);
  641. new->ea_flags = last;
  642. return new;
  643. }
  644. static void ea_set_remove_stuffed(struct gfs2_inode *ip,
  645. struct gfs2_ea_location *el)
  646. {
  647. struct gfs2_ea_header *ea = el->el_ea;
  648. struct gfs2_ea_header *prev = el->el_prev;
  649. u32 len;
  650. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  651. if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
  652. ea->ea_type = GFS2_EATYPE_UNUSED;
  653. return;
  654. } else if (GFS2_EA2NEXT(prev) != ea) {
  655. prev = GFS2_EA2NEXT(prev);
  656. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
  657. }
  658. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  659. prev->ea_rec_len = cpu_to_be32(len);
  660. if (GFS2_EA_IS_LAST(ea))
  661. prev->ea_flags |= GFS2_EAFLAG_LAST;
  662. }
  663. struct ea_set {
  664. int ea_split;
  665. struct gfs2_ea_request *es_er;
  666. struct gfs2_ea_location *es_el;
  667. struct buffer_head *es_bh;
  668. struct gfs2_ea_header *es_ea;
  669. };
  670. static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
  671. struct gfs2_ea_header *ea, struct ea_set *es)
  672. {
  673. struct gfs2_ea_request *er = es->es_er;
  674. struct buffer_head *dibh;
  675. int error;
  676. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
  677. if (error)
  678. return error;
  679. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  680. if (es->ea_split)
  681. ea = ea_split_ea(ea);
  682. ea_write(ip, ea, er);
  683. if (es->es_el)
  684. ea_set_remove_stuffed(ip, es->es_el);
  685. error = gfs2_meta_inode_buffer(ip, &dibh);
  686. if (error)
  687. goto out;
  688. if (er->er_flags & GFS2_ERF_MODE) {
  689. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  690. (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
  691. ip->i_inode.i_mode = er->er_mode;
  692. }
  693. ip->i_inode.i_ctime = CURRENT_TIME;
  694. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  695. gfs2_dinode_out(ip, dibh->b_data);
  696. brelse(dibh);
  697. out:
  698. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  699. return error;
  700. }
  701. static int ea_set_simple_alloc(struct gfs2_inode *ip,
  702. struct gfs2_ea_request *er, void *private)
  703. {
  704. struct ea_set *es = private;
  705. struct gfs2_ea_header *ea = es->es_ea;
  706. int error;
  707. gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
  708. if (es->ea_split)
  709. ea = ea_split_ea(ea);
  710. error = ea_write(ip, ea, er);
  711. if (error)
  712. return error;
  713. if (es->es_el)
  714. ea_set_remove_stuffed(ip, es->es_el);
  715. return 0;
  716. }
  717. static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
  718. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  719. void *private)
  720. {
  721. struct ea_set *es = private;
  722. unsigned int size;
  723. int stuffed;
  724. int error;
  725. stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er, &size);
  726. if (ea->ea_type == GFS2_EATYPE_UNUSED) {
  727. if (GFS2_EA_REC_LEN(ea) < size)
  728. return 0;
  729. if (!GFS2_EA_IS_STUFFED(ea)) {
  730. error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
  731. if (error)
  732. return error;
  733. }
  734. es->ea_split = 0;
  735. } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
  736. es->ea_split = 1;
  737. else
  738. return 0;
  739. if (stuffed) {
  740. error = ea_set_simple_noalloc(ip, bh, ea, es);
  741. if (error)
  742. return error;
  743. } else {
  744. unsigned int blks;
  745. es->es_bh = bh;
  746. es->es_ea = ea;
  747. blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
  748. GFS2_SB(&ip->i_inode)->sd_jbsize);
  749. error = ea_alloc_skeleton(ip, es->es_er, blks,
  750. ea_set_simple_alloc, es);
  751. if (error)
  752. return error;
  753. }
  754. return 1;
  755. }
  756. static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  757. void *private)
  758. {
  759. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  760. struct buffer_head *indbh, *newbh;
  761. __be64 *eablk;
  762. int error;
  763. int mh_size = sizeof(struct gfs2_meta_header);
  764. if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
  765. __be64 *end;
  766. error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
  767. &indbh);
  768. if (error)
  769. return error;
  770. if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
  771. error = -EIO;
  772. goto out;
  773. }
  774. eablk = (__be64 *)(indbh->b_data + mh_size);
  775. end = eablk + sdp->sd_inptrs;
  776. for (; eablk < end; eablk++)
  777. if (!*eablk)
  778. break;
  779. if (eablk == end) {
  780. error = -ENOSPC;
  781. goto out;
  782. }
  783. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  784. } else {
  785. u64 blk;
  786. blk = gfs2_alloc_block(ip);
  787. gfs2_trans_add_unrevoke(sdp, blk, 1);
  788. indbh = gfs2_meta_new(ip->i_gl, blk);
  789. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  790. gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
  791. gfs2_buffer_clear_tail(indbh, mh_size);
  792. eablk = (__be64 *)(indbh->b_data + mh_size);
  793. *eablk = cpu_to_be64(ip->i_di.di_eattr);
  794. ip->i_di.di_eattr = blk;
  795. ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
  796. ip->i_di.di_blocks++;
  797. gfs2_set_inode_blocks(&ip->i_inode);
  798. eablk++;
  799. }
  800. error = ea_alloc_blk(ip, &newbh);
  801. if (error)
  802. goto out;
  803. *eablk = cpu_to_be64((u64)newbh->b_blocknr);
  804. error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
  805. brelse(newbh);
  806. if (error)
  807. goto out;
  808. if (private)
  809. ea_set_remove_stuffed(ip, private);
  810. out:
  811. brelse(indbh);
  812. return error;
  813. }
  814. static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  815. struct gfs2_ea_location *el)
  816. {
  817. struct ea_set es;
  818. unsigned int blks = 2;
  819. int error;
  820. memset(&es, 0, sizeof(struct ea_set));
  821. es.es_er = er;
  822. es.es_el = el;
  823. error = ea_foreach(ip, ea_set_simple, &es);
  824. if (error > 0)
  825. return 0;
  826. if (error)
  827. return error;
  828. if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
  829. blks++;
  830. if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
  831. blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
  832. return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
  833. }
  834. static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
  835. struct gfs2_ea_location *el)
  836. {
  837. if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
  838. el->el_prev = GFS2_EA2NEXT(el->el_prev);
  839. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  840. GFS2_EA2NEXT(el->el_prev) == el->el_ea);
  841. }
  842. return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
  843. }
  844. int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  845. {
  846. struct gfs2_ea_location el;
  847. int error;
  848. if (!ip->i_di.di_eattr) {
  849. if (er->er_flags & XATTR_REPLACE)
  850. return -ENODATA;
  851. return ea_init(ip, er);
  852. }
  853. error = gfs2_ea_find(ip, er, &el);
  854. if (error)
  855. return error;
  856. if (el.el_ea) {
  857. if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
  858. brelse(el.el_bh);
  859. return -EPERM;
  860. }
  861. error = -EEXIST;
  862. if (!(er->er_flags & XATTR_CREATE)) {
  863. int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
  864. error = ea_set_i(ip, er, &el);
  865. if (!error && unstuffed)
  866. ea_set_remove_unstuffed(ip, &el);
  867. }
  868. brelse(el.el_bh);
  869. } else {
  870. error = -ENODATA;
  871. if (!(er->er_flags & XATTR_REPLACE))
  872. error = ea_set_i(ip, er, NULL);
  873. }
  874. return error;
  875. }
  876. int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  877. {
  878. struct gfs2_holder i_gh;
  879. int error;
  880. if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  881. return -EINVAL;
  882. if (!er->er_data || !er->er_data_len) {
  883. er->er_data = NULL;
  884. er->er_data_len = 0;
  885. }
  886. error = ea_check_size(GFS2_SB(&ip->i_inode), er);
  887. if (error)
  888. return error;
  889. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  890. if (error)
  891. return error;
  892. if (IS_IMMUTABLE(&ip->i_inode))
  893. error = -EPERM;
  894. else
  895. error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
  896. gfs2_glock_dq_uninit(&i_gh);
  897. return error;
  898. }
  899. static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
  900. {
  901. struct gfs2_ea_header *ea = el->el_ea;
  902. struct gfs2_ea_header *prev = el->el_prev;
  903. struct buffer_head *dibh;
  904. int error;
  905. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
  906. if (error)
  907. return error;
  908. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  909. if (prev) {
  910. u32 len;
  911. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  912. prev->ea_rec_len = cpu_to_be32(len);
  913. if (GFS2_EA_IS_LAST(ea))
  914. prev->ea_flags |= GFS2_EAFLAG_LAST;
  915. } else
  916. ea->ea_type = GFS2_EATYPE_UNUSED;
  917. error = gfs2_meta_inode_buffer(ip, &dibh);
  918. if (!error) {
  919. ip->i_inode.i_ctime = CURRENT_TIME;
  920. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  921. gfs2_dinode_out(ip, dibh->b_data);
  922. brelse(dibh);
  923. }
  924. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  925. return error;
  926. }
  927. int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  928. {
  929. struct gfs2_ea_location el;
  930. int error;
  931. if (!ip->i_di.di_eattr)
  932. return -ENODATA;
  933. error = gfs2_ea_find(ip, er, &el);
  934. if (error)
  935. return error;
  936. if (!el.el_ea)
  937. return -ENODATA;
  938. if (GFS2_EA_IS_STUFFED(el.el_ea))
  939. error = ea_remove_stuffed(ip, &el);
  940. else
  941. error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
  942. 0);
  943. brelse(el.el_bh);
  944. return error;
  945. }
  946. /**
  947. * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
  948. * @ip: pointer to the inode of the target file
  949. * @er: request information
  950. *
  951. * Returns: errno
  952. */
  953. int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  954. {
  955. struct gfs2_holder i_gh;
  956. int error;
  957. if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  958. return -EINVAL;
  959. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  960. if (error)
  961. return error;
  962. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  963. error = -EPERM;
  964. else
  965. error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
  966. gfs2_glock_dq_uninit(&i_gh);
  967. return error;
  968. }
  969. static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
  970. struct gfs2_ea_header *ea, char *data)
  971. {
  972. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  973. struct buffer_head **bh;
  974. unsigned int amount = GFS2_EA_DATA_LEN(ea);
  975. unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
  976. __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
  977. unsigned int x;
  978. int error;
  979. bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
  980. if (!bh)
  981. return -ENOMEM;
  982. error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
  983. if (error)
  984. goto out;
  985. for (x = 0; x < nptrs; x++) {
  986. error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
  987. bh + x);
  988. if (error) {
  989. while (x--)
  990. brelse(bh[x]);
  991. goto fail;
  992. }
  993. dataptrs++;
  994. }
  995. for (x = 0; x < nptrs; x++) {
  996. error = gfs2_meta_wait(sdp, bh[x]);
  997. if (error) {
  998. for (; x < nptrs; x++)
  999. brelse(bh[x]);
  1000. goto fail;
  1001. }
  1002. if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
  1003. for (; x < nptrs; x++)
  1004. brelse(bh[x]);
  1005. error = -EIO;
  1006. goto fail;
  1007. }
  1008. gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
  1009. memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
  1010. (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
  1011. amount -= sdp->sd_jbsize;
  1012. data += sdp->sd_jbsize;
  1013. brelse(bh[x]);
  1014. }
  1015. out:
  1016. kfree(bh);
  1017. return error;
  1018. fail:
  1019. gfs2_trans_end(sdp);
  1020. kfree(bh);
  1021. return error;
  1022. }
  1023. int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
  1024. struct iattr *attr, char *data)
  1025. {
  1026. struct buffer_head *dibh;
  1027. int error;
  1028. if (GFS2_EA_IS_STUFFED(el->el_ea)) {
  1029. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
  1030. if (error)
  1031. return error;
  1032. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  1033. memcpy(GFS2_EA2DATA(el->el_ea), data,
  1034. GFS2_EA_DATA_LEN(el->el_ea));
  1035. } else
  1036. error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
  1037. if (error)
  1038. return error;
  1039. error = gfs2_meta_inode_buffer(ip, &dibh);
  1040. if (!error) {
  1041. error = inode_setattr(&ip->i_inode, attr);
  1042. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1043. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1044. gfs2_dinode_out(ip, dibh->b_data);
  1045. brelse(dibh);
  1046. }
  1047. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1048. return error;
  1049. }
  1050. static int ea_dealloc_indirect(struct gfs2_inode *ip)
  1051. {
  1052. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1053. struct gfs2_rgrp_list rlist;
  1054. struct buffer_head *indbh, *dibh;
  1055. __be64 *eablk, *end;
  1056. unsigned int rg_blocks = 0;
  1057. u64 bstart = 0;
  1058. unsigned int blen = 0;
  1059. unsigned int blks = 0;
  1060. unsigned int x;
  1061. int error;
  1062. memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
  1063. error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &indbh);
  1064. if (error)
  1065. return error;
  1066. if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
  1067. error = -EIO;
  1068. goto out;
  1069. }
  1070. eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
  1071. end = eablk + sdp->sd_inptrs;
  1072. for (; eablk < end; eablk++) {
  1073. u64 bn;
  1074. if (!*eablk)
  1075. break;
  1076. bn = be64_to_cpu(*eablk);
  1077. if (bstart + blen == bn)
  1078. blen++;
  1079. else {
  1080. if (bstart)
  1081. gfs2_rlist_add(sdp, &rlist, bstart);
  1082. bstart = bn;
  1083. blen = 1;
  1084. }
  1085. blks++;
  1086. }
  1087. if (bstart)
  1088. gfs2_rlist_add(sdp, &rlist, bstart);
  1089. else
  1090. goto out;
  1091. gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
  1092. for (x = 0; x < rlist.rl_rgrps; x++) {
  1093. struct gfs2_rgrpd *rgd;
  1094. rgd = rlist.rl_ghs[x].gh_gl->gl_object;
  1095. rg_blocks += rgd->rd_length;
  1096. }
  1097. error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
  1098. if (error)
  1099. goto out_rlist_free;
  1100. error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
  1101. RES_STATFS + RES_QUOTA, blks);
  1102. if (error)
  1103. goto out_gunlock;
  1104. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  1105. eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
  1106. bstart = 0;
  1107. blen = 0;
  1108. for (; eablk < end; eablk++) {
  1109. u64 bn;
  1110. if (!*eablk)
  1111. break;
  1112. bn = be64_to_cpu(*eablk);
  1113. if (bstart + blen == bn)
  1114. blen++;
  1115. else {
  1116. if (bstart)
  1117. gfs2_free_meta(ip, bstart, blen);
  1118. bstart = bn;
  1119. blen = 1;
  1120. }
  1121. *eablk = 0;
  1122. if (!ip->i_di.di_blocks)
  1123. gfs2_consist_inode(ip);
  1124. ip->i_di.di_blocks--;
  1125. gfs2_set_inode_blocks(&ip->i_inode);
  1126. }
  1127. if (bstart)
  1128. gfs2_free_meta(ip, bstart, blen);
  1129. ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
  1130. error = gfs2_meta_inode_buffer(ip, &dibh);
  1131. if (!error) {
  1132. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1133. gfs2_dinode_out(ip, dibh->b_data);
  1134. brelse(dibh);
  1135. }
  1136. gfs2_trans_end(sdp);
  1137. out_gunlock:
  1138. gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
  1139. out_rlist_free:
  1140. gfs2_rlist_free(&rlist);
  1141. out:
  1142. brelse(indbh);
  1143. return error;
  1144. }
  1145. static int ea_dealloc_block(struct gfs2_inode *ip)
  1146. {
  1147. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1148. struct gfs2_alloc *al = ip->i_alloc;
  1149. struct gfs2_rgrpd *rgd;
  1150. struct buffer_head *dibh;
  1151. int error;
  1152. rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
  1153. if (!rgd) {
  1154. gfs2_consist_inode(ip);
  1155. return -EIO;
  1156. }
  1157. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  1158. &al->al_rgd_gh);
  1159. if (error)
  1160. return error;
  1161. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
  1162. RES_QUOTA, 1);
  1163. if (error)
  1164. goto out_gunlock;
  1165. gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
  1166. ip->i_di.di_eattr = 0;
  1167. if (!ip->i_di.di_blocks)
  1168. gfs2_consist_inode(ip);
  1169. ip->i_di.di_blocks--;
  1170. gfs2_set_inode_blocks(&ip->i_inode);
  1171. error = gfs2_meta_inode_buffer(ip, &dibh);
  1172. if (!error) {
  1173. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1174. gfs2_dinode_out(ip, dibh->b_data);
  1175. brelse(dibh);
  1176. }
  1177. gfs2_trans_end(sdp);
  1178. out_gunlock:
  1179. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  1180. return error;
  1181. }
  1182. /**
  1183. * gfs2_ea_dealloc - deallocate the extended attribute fork
  1184. * @ip: the inode
  1185. *
  1186. * Returns: errno
  1187. */
  1188. int gfs2_ea_dealloc(struct gfs2_inode *ip)
  1189. {
  1190. struct gfs2_alloc *al;
  1191. int error;
  1192. al = gfs2_alloc_get(ip);
  1193. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  1194. if (error)
  1195. goto out_alloc;
  1196. error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
  1197. if (error)
  1198. goto out_quota;
  1199. error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
  1200. if (error)
  1201. goto out_rindex;
  1202. if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
  1203. error = ea_dealloc_indirect(ip);
  1204. if (error)
  1205. goto out_rindex;
  1206. }
  1207. error = ea_dealloc_block(ip);
  1208. out_rindex:
  1209. gfs2_glock_dq_uninit(&al->al_ri_gh);
  1210. out_quota:
  1211. gfs2_quota_unhold(ip);
  1212. out_alloc:
  1213. gfs2_alloc_put(ip);
  1214. return error;
  1215. }