eattr.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/xattr.h>
  15. #include <linux/gfs2_ondisk.h>
  16. #include <linux/lm_interface.h>
  17. #include <asm/uaccess.h>
  18. #include "gfs2.h"
  19. #include "incore.h"
  20. #include "acl.h"
  21. #include "eaops.h"
  22. #include "eattr.h"
  23. #include "glock.h"
  24. #include "inode.h"
  25. #include "meta_io.h"
  26. #include "quota.h"
  27. #include "rgrp.h"
  28. #include "trans.h"
  29. #include "util.h"
  30. /**
  31. * ea_calc_size - returns the acutal number of bytes the request will take up
  32. * (not counting any unstuffed data blocks)
  33. * @sdp:
  34. * @er:
  35. * @size:
  36. *
  37. * Returns: 1 if the EA should be stuffed
  38. */
  39. static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
  40. unsigned int *size)
  41. {
  42. *size = GFS2_EAREQ_SIZE_STUFFED(er);
  43. if (*size <= sdp->sd_jbsize)
  44. return 1;
  45. *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
  46. return 0;
  47. }
  48. static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
  49. {
  50. unsigned int size;
  51. if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
  52. return -ERANGE;
  53. ea_calc_size(sdp, er, &size);
  54. /* This can only happen with 512 byte blocks */
  55. if (size > sdp->sd_jbsize)
  56. return -ERANGE;
  57. return 0;
  58. }
  59. typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
  60. struct gfs2_ea_header *ea,
  61. struct gfs2_ea_header *prev, void *private);
  62. static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
  63. ea_call_t ea_call, void *data)
  64. {
  65. struct gfs2_ea_header *ea, *prev = NULL;
  66. int error = 0;
  67. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
  68. return -EIO;
  69. for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
  70. if (!GFS2_EA_REC_LEN(ea))
  71. goto fail;
  72. if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
  73. bh->b_data + bh->b_size))
  74. goto fail;
  75. if (!GFS2_EATYPE_VALID(ea->ea_type))
  76. goto fail;
  77. error = ea_call(ip, bh, ea, prev, data);
  78. if (error)
  79. return error;
  80. if (GFS2_EA_IS_LAST(ea)) {
  81. if ((char *)GFS2_EA2NEXT(ea) !=
  82. bh->b_data + bh->b_size)
  83. goto fail;
  84. break;
  85. }
  86. }
  87. return error;
  88. fail:
  89. gfs2_consist_inode(ip);
  90. return -EIO;
  91. }
  92. static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
  93. {
  94. struct buffer_head *bh, *eabh;
  95. __be64 *eablk, *end;
  96. int error;
  97. error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
  98. if (error)
  99. return error;
  100. if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
  101. error = ea_foreach_i(ip, bh, ea_call, data);
  102. goto out;
  103. }
  104. if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
  105. error = -EIO;
  106. goto out;
  107. }
  108. eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
  109. end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
  110. for (; eablk < end; eablk++) {
  111. u64 bn;
  112. if (!*eablk)
  113. break;
  114. bn = be64_to_cpu(*eablk);
  115. error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
  116. if (error)
  117. break;
  118. error = ea_foreach_i(ip, eabh, ea_call, data);
  119. brelse(eabh);
  120. if (error)
  121. break;
  122. }
  123. out:
  124. brelse(bh);
  125. return error;
  126. }
  127. struct ea_find {
  128. struct gfs2_ea_request *ef_er;
  129. struct gfs2_ea_location *ef_el;
  130. };
  131. static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
  132. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  133. void *private)
  134. {
  135. struct ea_find *ef = private;
  136. struct gfs2_ea_request *er = ef->ef_er;
  137. if (ea->ea_type == GFS2_EATYPE_UNUSED)
  138. return 0;
  139. if (ea->ea_type == er->er_type) {
  140. if (ea->ea_name_len == er->er_name_len &&
  141. !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
  142. struct gfs2_ea_location *el = ef->ef_el;
  143. get_bh(bh);
  144. el->el_bh = bh;
  145. el->el_ea = ea;
  146. el->el_prev = prev;
  147. return 1;
  148. }
  149. }
  150. return 0;
  151. }
  152. int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  153. struct gfs2_ea_location *el)
  154. {
  155. struct ea_find ef;
  156. int error;
  157. ef.ef_er = er;
  158. ef.ef_el = el;
  159. memset(el, 0, sizeof(struct gfs2_ea_location));
  160. error = ea_foreach(ip, ea_find_i, &ef);
  161. if (error > 0)
  162. return 0;
  163. return error;
  164. }
  165. /**
  166. * ea_dealloc_unstuffed -
  167. * @ip:
  168. * @bh:
  169. * @ea:
  170. * @prev:
  171. * @private:
  172. *
  173. * Take advantage of the fact that all unstuffed blocks are
  174. * allocated from the same RG. But watch, this may not always
  175. * be true.
  176. *
  177. * Returns: errno
  178. */
  179. static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
  180. struct gfs2_ea_header *ea,
  181. struct gfs2_ea_header *prev, void *private)
  182. {
  183. int *leave = private;
  184. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  185. struct gfs2_rgrpd *rgd;
  186. struct gfs2_holder rg_gh;
  187. struct buffer_head *dibh;
  188. __be64 *dataptrs;
  189. u64 bn = 0;
  190. u64 bstart = 0;
  191. unsigned int blen = 0;
  192. unsigned int blks = 0;
  193. unsigned int x;
  194. int error;
  195. if (GFS2_EA_IS_STUFFED(ea))
  196. return 0;
  197. dataptrs = GFS2_EA2DATAPTRS(ea);
  198. for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
  199. if (*dataptrs) {
  200. blks++;
  201. bn = be64_to_cpu(*dataptrs);
  202. }
  203. }
  204. if (!blks)
  205. return 0;
  206. rgd = gfs2_blk2rgrpd(sdp, bn);
  207. if (!rgd) {
  208. gfs2_consist_inode(ip);
  209. return -EIO;
  210. }
  211. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
  212. if (error)
  213. return error;
  214. error = gfs2_trans_begin(sdp, rgd->rd_ri.ri_length + RES_DINODE +
  215. RES_EATTR + RES_STATFS + RES_QUOTA, blks);
  216. if (error)
  217. goto out_gunlock;
  218. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  219. dataptrs = GFS2_EA2DATAPTRS(ea);
  220. for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
  221. if (!*dataptrs)
  222. break;
  223. bn = be64_to_cpu(*dataptrs);
  224. if (bstart + blen == bn)
  225. blen++;
  226. else {
  227. if (bstart)
  228. gfs2_free_meta(ip, bstart, blen);
  229. bstart = bn;
  230. blen = 1;
  231. }
  232. *dataptrs = 0;
  233. if (!ip->i_di.di_blocks)
  234. gfs2_consist_inode(ip);
  235. ip->i_di.di_blocks--;
  236. gfs2_set_inode_blocks(&ip->i_inode);
  237. }
  238. if (bstart)
  239. gfs2_free_meta(ip, bstart, blen);
  240. if (prev && !leave) {
  241. u32 len;
  242. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  243. prev->ea_rec_len = cpu_to_be32(len);
  244. if (GFS2_EA_IS_LAST(ea))
  245. prev->ea_flags |= GFS2_EAFLAG_LAST;
  246. } else {
  247. ea->ea_type = GFS2_EATYPE_UNUSED;
  248. ea->ea_num_ptrs = 0;
  249. }
  250. error = gfs2_meta_inode_buffer(ip, &dibh);
  251. if (!error) {
  252. ip->i_inode.i_ctime = CURRENT_TIME_SEC;
  253. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  254. gfs2_dinode_out(ip, dibh->b_data);
  255. brelse(dibh);
  256. }
  257. gfs2_trans_end(sdp);
  258. out_gunlock:
  259. gfs2_glock_dq_uninit(&rg_gh);
  260. return error;
  261. }
  262. static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
  263. struct gfs2_ea_header *ea,
  264. struct gfs2_ea_header *prev, int leave)
  265. {
  266. struct gfs2_alloc *al;
  267. int error;
  268. al = gfs2_alloc_get(ip);
  269. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  270. if (error)
  271. goto out_alloc;
  272. error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
  273. if (error)
  274. goto out_quota;
  275. error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
  276. gfs2_glock_dq_uninit(&al->al_ri_gh);
  277. out_quota:
  278. gfs2_quota_unhold(ip);
  279. out_alloc:
  280. gfs2_alloc_put(ip);
  281. return error;
  282. }
  283. struct ea_list {
  284. struct gfs2_ea_request *ei_er;
  285. unsigned int ei_size;
  286. };
  287. static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
  288. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  289. void *private)
  290. {
  291. struct ea_list *ei = private;
  292. struct gfs2_ea_request *er = ei->ei_er;
  293. unsigned int ea_size = gfs2_ea_strlen(ea);
  294. if (ea->ea_type == GFS2_EATYPE_UNUSED)
  295. return 0;
  296. if (er->er_data_len) {
  297. char *prefix = NULL;
  298. unsigned int l = 0;
  299. char c = 0;
  300. if (ei->ei_size + ea_size > er->er_data_len)
  301. return -ERANGE;
  302. switch (ea->ea_type) {
  303. case GFS2_EATYPE_USR:
  304. prefix = "user.";
  305. l = 5;
  306. break;
  307. case GFS2_EATYPE_SYS:
  308. prefix = "system.";
  309. l = 7;
  310. break;
  311. case GFS2_EATYPE_SECURITY:
  312. prefix = "security.";
  313. l = 9;
  314. break;
  315. }
  316. BUG_ON(l == 0);
  317. memcpy(er->er_data + ei->ei_size, prefix, l);
  318. memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
  319. ea->ea_name_len);
  320. memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
  321. }
  322. ei->ei_size += ea_size;
  323. return 0;
  324. }
  325. /**
  326. * gfs2_ea_list -
  327. * @ip:
  328. * @er:
  329. *
  330. * Returns: actual size of data on success, -errno on error
  331. */
  332. int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  333. {
  334. struct gfs2_holder i_gh;
  335. int error;
  336. if (!er->er_data || !er->er_data_len) {
  337. er->er_data = NULL;
  338. er->er_data_len = 0;
  339. }
  340. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  341. if (error)
  342. return error;
  343. if (ip->i_di.di_eattr) {
  344. struct ea_list ei = { .ei_er = er, .ei_size = 0 };
  345. error = ea_foreach(ip, ea_list_i, &ei);
  346. if (!error)
  347. error = ei.ei_size;
  348. }
  349. gfs2_glock_dq_uninit(&i_gh);
  350. return error;
  351. }
  352. /**
  353. * ea_get_unstuffed - actually copies the unstuffed data into the
  354. * request buffer
  355. * @ip: The GFS2 inode
  356. * @ea: The extended attribute header structure
  357. * @data: The data to be copied
  358. *
  359. * Returns: errno
  360. */
  361. static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
  362. char *data)
  363. {
  364. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  365. struct buffer_head **bh;
  366. unsigned int amount = GFS2_EA_DATA_LEN(ea);
  367. unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
  368. __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
  369. unsigned int x;
  370. int error = 0;
  371. bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
  372. if (!bh)
  373. return -ENOMEM;
  374. for (x = 0; x < nptrs; x++) {
  375. error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
  376. bh + x);
  377. if (error) {
  378. while (x--)
  379. brelse(bh[x]);
  380. goto out;
  381. }
  382. dataptrs++;
  383. }
  384. for (x = 0; x < nptrs; x++) {
  385. error = gfs2_meta_wait(sdp, bh[x]);
  386. if (error) {
  387. for (; x < nptrs; x++)
  388. brelse(bh[x]);
  389. goto out;
  390. }
  391. if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
  392. for (; x < nptrs; x++)
  393. brelse(bh[x]);
  394. error = -EIO;
  395. goto out;
  396. }
  397. memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
  398. (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
  399. amount -= sdp->sd_jbsize;
  400. data += sdp->sd_jbsize;
  401. brelse(bh[x]);
  402. }
  403. out:
  404. kfree(bh);
  405. return error;
  406. }
  407. int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
  408. char *data)
  409. {
  410. if (GFS2_EA_IS_STUFFED(el->el_ea)) {
  411. memcpy(data, GFS2_EA2DATA(el->el_ea), GFS2_EA_DATA_LEN(el->el_ea));
  412. return 0;
  413. } else
  414. return ea_get_unstuffed(ip, el->el_ea, data);
  415. }
  416. /**
  417. * gfs2_ea_get_i -
  418. * @ip: The GFS2 inode
  419. * @er: The request structure
  420. *
  421. * Returns: actual size of data on success, -errno on error
  422. */
  423. int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  424. {
  425. struct gfs2_ea_location el;
  426. int error;
  427. if (!ip->i_di.di_eattr)
  428. return -ENODATA;
  429. error = gfs2_ea_find(ip, er, &el);
  430. if (error)
  431. return error;
  432. if (!el.el_ea)
  433. return -ENODATA;
  434. if (er->er_data_len) {
  435. if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
  436. error = -ERANGE;
  437. else
  438. error = gfs2_ea_get_copy(ip, &el, er->er_data);
  439. }
  440. if (!error)
  441. error = GFS2_EA_DATA_LEN(el.el_ea);
  442. brelse(el.el_bh);
  443. return error;
  444. }
  445. /**
  446. * gfs2_ea_get -
  447. * @ip: The GFS2 inode
  448. * @er: The request structure
  449. *
  450. * Returns: actual size of data on success, -errno on error
  451. */
  452. int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  453. {
  454. struct gfs2_holder i_gh;
  455. int error;
  456. if (!er->er_name_len ||
  457. er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  458. return -EINVAL;
  459. if (!er->er_data || !er->er_data_len) {
  460. er->er_data = NULL;
  461. er->er_data_len = 0;
  462. }
  463. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  464. if (error)
  465. return error;
  466. error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
  467. gfs2_glock_dq_uninit(&i_gh);
  468. return error;
  469. }
  470. /**
  471. * ea_alloc_blk - allocates a new block for extended attributes.
  472. * @ip: A pointer to the inode that's getting extended attributes
  473. * @bhp: Pointer to pointer to a struct buffer_head
  474. *
  475. * Returns: errno
  476. */
  477. static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
  478. {
  479. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  480. struct gfs2_ea_header *ea;
  481. u64 block;
  482. block = gfs2_alloc_meta(ip);
  483. *bhp = gfs2_meta_new(ip->i_gl, block);
  484. gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
  485. gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
  486. gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
  487. ea = GFS2_EA_BH2FIRST(*bhp);
  488. ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
  489. ea->ea_type = GFS2_EATYPE_UNUSED;
  490. ea->ea_flags = GFS2_EAFLAG_LAST;
  491. ea->ea_num_ptrs = 0;
  492. ip->i_di.di_blocks++;
  493. gfs2_set_inode_blocks(&ip->i_inode);
  494. return 0;
  495. }
  496. /**
  497. * ea_write - writes the request info to an ea, creating new blocks if
  498. * necessary
  499. * @ip: inode that is being modified
  500. * @ea: the location of the new ea in a block
  501. * @er: the write request
  502. *
  503. * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
  504. *
  505. * returns : errno
  506. */
  507. static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
  508. struct gfs2_ea_request *er)
  509. {
  510. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  511. ea->ea_data_len = cpu_to_be32(er->er_data_len);
  512. ea->ea_name_len = er->er_name_len;
  513. ea->ea_type = er->er_type;
  514. ea->__pad = 0;
  515. memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
  516. if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
  517. ea->ea_num_ptrs = 0;
  518. memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
  519. } else {
  520. __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
  521. const char *data = er->er_data;
  522. unsigned int data_len = er->er_data_len;
  523. unsigned int copy;
  524. unsigned int x;
  525. ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
  526. for (x = 0; x < ea->ea_num_ptrs; x++) {
  527. struct buffer_head *bh;
  528. u64 block;
  529. int mh_size = sizeof(struct gfs2_meta_header);
  530. block = gfs2_alloc_meta(ip);
  531. bh = gfs2_meta_new(ip->i_gl, block);
  532. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  533. gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
  534. ip->i_di.di_blocks++;
  535. gfs2_set_inode_blocks(&ip->i_inode);
  536. copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
  537. data_len;
  538. memcpy(bh->b_data + mh_size, data, copy);
  539. if (copy < sdp->sd_jbsize)
  540. memset(bh->b_data + mh_size + copy, 0,
  541. sdp->sd_jbsize - copy);
  542. *dataptr++ = cpu_to_be64(bh->b_blocknr);
  543. data += copy;
  544. data_len -= copy;
  545. brelse(bh);
  546. }
  547. gfs2_assert_withdraw(sdp, !data_len);
  548. }
  549. return 0;
  550. }
  551. typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
  552. struct gfs2_ea_request *er, void *private);
  553. static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  554. unsigned int blks,
  555. ea_skeleton_call_t skeleton_call, void *private)
  556. {
  557. struct gfs2_alloc *al;
  558. struct buffer_head *dibh;
  559. int error;
  560. al = gfs2_alloc_get(ip);
  561. error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  562. if (error)
  563. goto out;
  564. error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
  565. if (error)
  566. goto out_gunlock_q;
  567. al->al_requested = blks;
  568. error = gfs2_inplace_reserve(ip);
  569. if (error)
  570. goto out_gunlock_q;
  571. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
  572. blks + al->al_rgd->rd_ri.ri_length +
  573. RES_DINODE + RES_STATFS + RES_QUOTA, 0);
  574. if (error)
  575. goto out_ipres;
  576. error = skeleton_call(ip, er, private);
  577. if (error)
  578. goto out_end_trans;
  579. error = gfs2_meta_inode_buffer(ip, &dibh);
  580. if (!error) {
  581. if (er->er_flags & GFS2_ERF_MODE) {
  582. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  583. (ip->i_inode.i_mode & S_IFMT) ==
  584. (er->er_mode & S_IFMT));
  585. ip->i_inode.i_mode = er->er_mode;
  586. }
  587. ip->i_inode.i_ctime = CURRENT_TIME_SEC;
  588. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  589. gfs2_dinode_out(ip, dibh->b_data);
  590. brelse(dibh);
  591. }
  592. out_end_trans:
  593. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  594. out_ipres:
  595. gfs2_inplace_release(ip);
  596. out_gunlock_q:
  597. gfs2_quota_unlock(ip);
  598. out:
  599. gfs2_alloc_put(ip);
  600. return error;
  601. }
  602. static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  603. void *private)
  604. {
  605. struct buffer_head *bh;
  606. int error;
  607. error = ea_alloc_blk(ip, &bh);
  608. if (error)
  609. return error;
  610. ip->i_di.di_eattr = bh->b_blocknr;
  611. error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
  612. brelse(bh);
  613. return error;
  614. }
  615. /**
  616. * ea_init - initializes a new eattr block
  617. * @ip:
  618. * @er:
  619. *
  620. * Returns: errno
  621. */
  622. static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  623. {
  624. unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
  625. unsigned int blks = 1;
  626. if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
  627. blks += DIV_ROUND_UP(er->er_data_len, jbsize);
  628. return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
  629. }
  630. static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
  631. {
  632. u32 ea_size = GFS2_EA_SIZE(ea);
  633. struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
  634. ea_size);
  635. u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
  636. int last = ea->ea_flags & GFS2_EAFLAG_LAST;
  637. ea->ea_rec_len = cpu_to_be32(ea_size);
  638. ea->ea_flags ^= last;
  639. new->ea_rec_len = cpu_to_be32(new_size);
  640. new->ea_flags = last;
  641. return new;
  642. }
  643. static void ea_set_remove_stuffed(struct gfs2_inode *ip,
  644. struct gfs2_ea_location *el)
  645. {
  646. struct gfs2_ea_header *ea = el->el_ea;
  647. struct gfs2_ea_header *prev = el->el_prev;
  648. u32 len;
  649. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  650. if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
  651. ea->ea_type = GFS2_EATYPE_UNUSED;
  652. return;
  653. } else if (GFS2_EA2NEXT(prev) != ea) {
  654. prev = GFS2_EA2NEXT(prev);
  655. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
  656. }
  657. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  658. prev->ea_rec_len = cpu_to_be32(len);
  659. if (GFS2_EA_IS_LAST(ea))
  660. prev->ea_flags |= GFS2_EAFLAG_LAST;
  661. }
  662. struct ea_set {
  663. int ea_split;
  664. struct gfs2_ea_request *es_er;
  665. struct gfs2_ea_location *es_el;
  666. struct buffer_head *es_bh;
  667. struct gfs2_ea_header *es_ea;
  668. };
  669. static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
  670. struct gfs2_ea_header *ea, struct ea_set *es)
  671. {
  672. struct gfs2_ea_request *er = es->es_er;
  673. struct buffer_head *dibh;
  674. int error;
  675. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
  676. if (error)
  677. return error;
  678. gfs2_trans_add_bh(ip->i_gl, bh, 1);
  679. if (es->ea_split)
  680. ea = ea_split_ea(ea);
  681. ea_write(ip, ea, er);
  682. if (es->es_el)
  683. ea_set_remove_stuffed(ip, es->es_el);
  684. error = gfs2_meta_inode_buffer(ip, &dibh);
  685. if (error)
  686. goto out;
  687. if (er->er_flags & GFS2_ERF_MODE) {
  688. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  689. (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
  690. ip->i_inode.i_mode = er->er_mode;
  691. }
  692. ip->i_inode.i_ctime = CURRENT_TIME_SEC;
  693. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  694. gfs2_dinode_out(ip, dibh->b_data);
  695. brelse(dibh);
  696. out:
  697. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  698. return error;
  699. }
  700. static int ea_set_simple_alloc(struct gfs2_inode *ip,
  701. struct gfs2_ea_request *er, void *private)
  702. {
  703. struct ea_set *es = private;
  704. struct gfs2_ea_header *ea = es->es_ea;
  705. int error;
  706. gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
  707. if (es->ea_split)
  708. ea = ea_split_ea(ea);
  709. error = ea_write(ip, ea, er);
  710. if (error)
  711. return error;
  712. if (es->es_el)
  713. ea_set_remove_stuffed(ip, es->es_el);
  714. return 0;
  715. }
  716. static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
  717. struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
  718. void *private)
  719. {
  720. struct ea_set *es = private;
  721. unsigned int size;
  722. int stuffed;
  723. int error;
  724. stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er, &size);
  725. if (ea->ea_type == GFS2_EATYPE_UNUSED) {
  726. if (GFS2_EA_REC_LEN(ea) < size)
  727. return 0;
  728. if (!GFS2_EA_IS_STUFFED(ea)) {
  729. error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
  730. if (error)
  731. return error;
  732. }
  733. es->ea_split = 0;
  734. } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
  735. es->ea_split = 1;
  736. else
  737. return 0;
  738. if (stuffed) {
  739. error = ea_set_simple_noalloc(ip, bh, ea, es);
  740. if (error)
  741. return error;
  742. } else {
  743. unsigned int blks;
  744. es->es_bh = bh;
  745. es->es_ea = ea;
  746. blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
  747. GFS2_SB(&ip->i_inode)->sd_jbsize);
  748. error = ea_alloc_skeleton(ip, es->es_er, blks,
  749. ea_set_simple_alloc, es);
  750. if (error)
  751. return error;
  752. }
  753. return 1;
  754. }
  755. static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  756. void *private)
  757. {
  758. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  759. struct buffer_head *indbh, *newbh;
  760. __be64 *eablk;
  761. int error;
  762. int mh_size = sizeof(struct gfs2_meta_header);
  763. if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
  764. __be64 *end;
  765. error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
  766. &indbh);
  767. if (error)
  768. return error;
  769. if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
  770. error = -EIO;
  771. goto out;
  772. }
  773. eablk = (__be64 *)(indbh->b_data + mh_size);
  774. end = eablk + sdp->sd_inptrs;
  775. for (; eablk < end; eablk++)
  776. if (!*eablk)
  777. break;
  778. if (eablk == end) {
  779. error = -ENOSPC;
  780. goto out;
  781. }
  782. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  783. } else {
  784. u64 blk;
  785. blk = gfs2_alloc_meta(ip);
  786. indbh = gfs2_meta_new(ip->i_gl, blk);
  787. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  788. gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
  789. gfs2_buffer_clear_tail(indbh, mh_size);
  790. eablk = (__be64 *)(indbh->b_data + mh_size);
  791. *eablk = cpu_to_be64(ip->i_di.di_eattr);
  792. ip->i_di.di_eattr = blk;
  793. ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
  794. ip->i_di.di_blocks++;
  795. gfs2_set_inode_blocks(&ip->i_inode);
  796. eablk++;
  797. }
  798. error = ea_alloc_blk(ip, &newbh);
  799. if (error)
  800. goto out;
  801. *eablk = cpu_to_be64((u64)newbh->b_blocknr);
  802. error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
  803. brelse(newbh);
  804. if (error)
  805. goto out;
  806. if (private)
  807. ea_set_remove_stuffed(ip, private);
  808. out:
  809. brelse(indbh);
  810. return error;
  811. }
  812. static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
  813. struct gfs2_ea_location *el)
  814. {
  815. struct ea_set es;
  816. unsigned int blks = 2;
  817. int error;
  818. memset(&es, 0, sizeof(struct ea_set));
  819. es.es_er = er;
  820. es.es_el = el;
  821. error = ea_foreach(ip, ea_set_simple, &es);
  822. if (error > 0)
  823. return 0;
  824. if (error)
  825. return error;
  826. if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
  827. blks++;
  828. if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
  829. blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
  830. return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
  831. }
  832. static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
  833. struct gfs2_ea_location *el)
  834. {
  835. if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
  836. el->el_prev = GFS2_EA2NEXT(el->el_prev);
  837. gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
  838. GFS2_EA2NEXT(el->el_prev) == el->el_ea);
  839. }
  840. return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
  841. }
  842. int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  843. {
  844. struct gfs2_ea_location el;
  845. int error;
  846. if (!ip->i_di.di_eattr) {
  847. if (er->er_flags & XATTR_REPLACE)
  848. return -ENODATA;
  849. return ea_init(ip, er);
  850. }
  851. error = gfs2_ea_find(ip, er, &el);
  852. if (error)
  853. return error;
  854. if (el.el_ea) {
  855. if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
  856. brelse(el.el_bh);
  857. return -EPERM;
  858. }
  859. error = -EEXIST;
  860. if (!(er->er_flags & XATTR_CREATE)) {
  861. int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
  862. error = ea_set_i(ip, er, &el);
  863. if (!error && unstuffed)
  864. ea_set_remove_unstuffed(ip, &el);
  865. }
  866. brelse(el.el_bh);
  867. } else {
  868. error = -ENODATA;
  869. if (!(er->er_flags & XATTR_REPLACE))
  870. error = ea_set_i(ip, er, NULL);
  871. }
  872. return error;
  873. }
  874. int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  875. {
  876. struct gfs2_holder i_gh;
  877. int error;
  878. if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  879. return -EINVAL;
  880. if (!er->er_data || !er->er_data_len) {
  881. er->er_data = NULL;
  882. er->er_data_len = 0;
  883. }
  884. error = ea_check_size(GFS2_SB(&ip->i_inode), er);
  885. if (error)
  886. return error;
  887. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  888. if (error)
  889. return error;
  890. if (IS_IMMUTABLE(&ip->i_inode))
  891. error = -EPERM;
  892. else
  893. error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
  894. gfs2_glock_dq_uninit(&i_gh);
  895. return error;
  896. }
  897. static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
  898. {
  899. struct gfs2_ea_header *ea = el->el_ea;
  900. struct gfs2_ea_header *prev = el->el_prev;
  901. struct buffer_head *dibh;
  902. int error;
  903. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
  904. if (error)
  905. return error;
  906. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  907. if (prev) {
  908. u32 len;
  909. len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
  910. prev->ea_rec_len = cpu_to_be32(len);
  911. if (GFS2_EA_IS_LAST(ea))
  912. prev->ea_flags |= GFS2_EAFLAG_LAST;
  913. } else
  914. ea->ea_type = GFS2_EATYPE_UNUSED;
  915. error = gfs2_meta_inode_buffer(ip, &dibh);
  916. if (!error) {
  917. ip->i_inode.i_ctime = CURRENT_TIME_SEC;
  918. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  919. gfs2_dinode_out(ip, dibh->b_data);
  920. brelse(dibh);
  921. }
  922. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  923. return error;
  924. }
  925. int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  926. {
  927. struct gfs2_ea_location el;
  928. int error;
  929. if (!ip->i_di.di_eattr)
  930. return -ENODATA;
  931. error = gfs2_ea_find(ip, er, &el);
  932. if (error)
  933. return error;
  934. if (!el.el_ea)
  935. return -ENODATA;
  936. if (GFS2_EA_IS_STUFFED(el.el_ea))
  937. error = ea_remove_stuffed(ip, &el);
  938. else
  939. error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
  940. 0);
  941. brelse(el.el_bh);
  942. return error;
  943. }
  944. /**
  945. * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
  946. * @ip: pointer to the inode of the target file
  947. * @er: request information
  948. *
  949. * Returns: errno
  950. */
  951. int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
  952. {
  953. struct gfs2_holder i_gh;
  954. int error;
  955. if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
  956. return -EINVAL;
  957. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  958. if (error)
  959. return error;
  960. if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
  961. error = -EPERM;
  962. else
  963. error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
  964. gfs2_glock_dq_uninit(&i_gh);
  965. return error;
  966. }
  967. static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
  968. struct gfs2_ea_header *ea, char *data)
  969. {
  970. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  971. struct buffer_head **bh;
  972. unsigned int amount = GFS2_EA_DATA_LEN(ea);
  973. unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
  974. __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
  975. unsigned int x;
  976. int error;
  977. bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
  978. if (!bh)
  979. return -ENOMEM;
  980. error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
  981. if (error)
  982. goto out;
  983. for (x = 0; x < nptrs; x++) {
  984. error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
  985. bh + x);
  986. if (error) {
  987. while (x--)
  988. brelse(bh[x]);
  989. goto fail;
  990. }
  991. dataptrs++;
  992. }
  993. for (x = 0; x < nptrs; x++) {
  994. error = gfs2_meta_wait(sdp, bh[x]);
  995. if (error) {
  996. for (; x < nptrs; x++)
  997. brelse(bh[x]);
  998. goto fail;
  999. }
  1000. if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
  1001. for (; x < nptrs; x++)
  1002. brelse(bh[x]);
  1003. error = -EIO;
  1004. goto fail;
  1005. }
  1006. gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
  1007. memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
  1008. (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
  1009. amount -= sdp->sd_jbsize;
  1010. data += sdp->sd_jbsize;
  1011. brelse(bh[x]);
  1012. }
  1013. out:
  1014. kfree(bh);
  1015. return error;
  1016. fail:
  1017. gfs2_trans_end(sdp);
  1018. kfree(bh);
  1019. return error;
  1020. }
  1021. int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
  1022. struct iattr *attr, char *data)
  1023. {
  1024. struct buffer_head *dibh;
  1025. int error;
  1026. if (GFS2_EA_IS_STUFFED(el->el_ea)) {
  1027. error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
  1028. if (error)
  1029. return error;
  1030. gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
  1031. memcpy(GFS2_EA2DATA(el->el_ea), data,
  1032. GFS2_EA_DATA_LEN(el->el_ea));
  1033. } else
  1034. error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
  1035. if (error)
  1036. return error;
  1037. error = gfs2_meta_inode_buffer(ip, &dibh);
  1038. if (!error) {
  1039. error = inode_setattr(&ip->i_inode, attr);
  1040. gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
  1041. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1042. gfs2_dinode_out(ip, dibh->b_data);
  1043. brelse(dibh);
  1044. }
  1045. gfs2_trans_end(GFS2_SB(&ip->i_inode));
  1046. return error;
  1047. }
  1048. static int ea_dealloc_indirect(struct gfs2_inode *ip)
  1049. {
  1050. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1051. struct gfs2_rgrp_list rlist;
  1052. struct buffer_head *indbh, *dibh;
  1053. __be64 *eablk, *end;
  1054. unsigned int rg_blocks = 0;
  1055. u64 bstart = 0;
  1056. unsigned int blen = 0;
  1057. unsigned int blks = 0;
  1058. unsigned int x;
  1059. int error;
  1060. memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
  1061. error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &indbh);
  1062. if (error)
  1063. return error;
  1064. if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
  1065. error = -EIO;
  1066. goto out;
  1067. }
  1068. eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
  1069. end = eablk + sdp->sd_inptrs;
  1070. for (; eablk < end; eablk++) {
  1071. u64 bn;
  1072. if (!*eablk)
  1073. break;
  1074. bn = be64_to_cpu(*eablk);
  1075. if (bstart + blen == bn)
  1076. blen++;
  1077. else {
  1078. if (bstart)
  1079. gfs2_rlist_add(sdp, &rlist, bstart);
  1080. bstart = bn;
  1081. blen = 1;
  1082. }
  1083. blks++;
  1084. }
  1085. if (bstart)
  1086. gfs2_rlist_add(sdp, &rlist, bstart);
  1087. else
  1088. goto out;
  1089. gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
  1090. for (x = 0; x < rlist.rl_rgrps; x++) {
  1091. struct gfs2_rgrpd *rgd;
  1092. rgd = rlist.rl_ghs[x].gh_gl->gl_object;
  1093. rg_blocks += rgd->rd_ri.ri_length;
  1094. }
  1095. error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
  1096. if (error)
  1097. goto out_rlist_free;
  1098. error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
  1099. RES_STATFS + RES_QUOTA, blks);
  1100. if (error)
  1101. goto out_gunlock;
  1102. gfs2_trans_add_bh(ip->i_gl, indbh, 1);
  1103. eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
  1104. bstart = 0;
  1105. blen = 0;
  1106. for (; eablk < end; eablk++) {
  1107. u64 bn;
  1108. if (!*eablk)
  1109. break;
  1110. bn = be64_to_cpu(*eablk);
  1111. if (bstart + blen == bn)
  1112. blen++;
  1113. else {
  1114. if (bstart)
  1115. gfs2_free_meta(ip, bstart, blen);
  1116. bstart = bn;
  1117. blen = 1;
  1118. }
  1119. *eablk = 0;
  1120. if (!ip->i_di.di_blocks)
  1121. gfs2_consist_inode(ip);
  1122. ip->i_di.di_blocks--;
  1123. gfs2_set_inode_blocks(&ip->i_inode);
  1124. }
  1125. if (bstart)
  1126. gfs2_free_meta(ip, bstart, blen);
  1127. ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
  1128. error = gfs2_meta_inode_buffer(ip, &dibh);
  1129. if (!error) {
  1130. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1131. gfs2_dinode_out(ip, dibh->b_data);
  1132. brelse(dibh);
  1133. }
  1134. gfs2_trans_end(sdp);
  1135. out_gunlock:
  1136. gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
  1137. out_rlist_free:
  1138. gfs2_rlist_free(&rlist);
  1139. out:
  1140. brelse(indbh);
  1141. return error;
  1142. }
  1143. static int ea_dealloc_block(struct gfs2_inode *ip)
  1144. {
  1145. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1146. struct gfs2_alloc *al = &ip->i_alloc;
  1147. struct gfs2_rgrpd *rgd;
  1148. struct buffer_head *dibh;
  1149. int error;
  1150. rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
  1151. if (!rgd) {
  1152. gfs2_consist_inode(ip);
  1153. return -EIO;
  1154. }
  1155. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
  1156. &al->al_rgd_gh);
  1157. if (error)
  1158. return error;
  1159. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
  1160. RES_QUOTA, 1);
  1161. if (error)
  1162. goto out_gunlock;
  1163. gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
  1164. ip->i_di.di_eattr = 0;
  1165. if (!ip->i_di.di_blocks)
  1166. gfs2_consist_inode(ip);
  1167. ip->i_di.di_blocks--;
  1168. gfs2_set_inode_blocks(&ip->i_inode);
  1169. error = gfs2_meta_inode_buffer(ip, &dibh);
  1170. if (!error) {
  1171. gfs2_trans_add_bh(ip->i_gl, dibh, 1);
  1172. gfs2_dinode_out(ip, dibh->b_data);
  1173. brelse(dibh);
  1174. }
  1175. gfs2_trans_end(sdp);
  1176. out_gunlock:
  1177. gfs2_glock_dq_uninit(&al->al_rgd_gh);
  1178. return error;
  1179. }
  1180. /**
  1181. * gfs2_ea_dealloc - deallocate the extended attribute fork
  1182. * @ip: the inode
  1183. *
  1184. * Returns: errno
  1185. */
  1186. int gfs2_ea_dealloc(struct gfs2_inode *ip)
  1187. {
  1188. struct gfs2_alloc *al;
  1189. int error;
  1190. al = gfs2_alloc_get(ip);
  1191. error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
  1192. if (error)
  1193. goto out_alloc;
  1194. error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
  1195. if (error)
  1196. goto out_quota;
  1197. error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
  1198. if (error)
  1199. goto out_rindex;
  1200. if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
  1201. error = ea_dealloc_indirect(ip);
  1202. if (error)
  1203. goto out_rindex;
  1204. }
  1205. error = ea_dealloc_block(ip);
  1206. out_rindex:
  1207. gfs2_glock_dq_uninit(&al->al_ri_gh);
  1208. out_quota:
  1209. gfs2_quota_unhold(ip);
  1210. out_alloc:
  1211. gfs2_alloc_put(ip);
  1212. return error;
  1213. }