xfs_qm.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_format.h"
  21. #include "xfs_bit.h"
  22. #include "xfs_log.h"
  23. #include "xfs_trans.h"
  24. #include "xfs_sb.h"
  25. #include "xfs_ag.h"
  26. #include "xfs_alloc.h"
  27. #include "xfs_quota.h"
  28. #include "xfs_mount.h"
  29. #include "xfs_bmap_btree.h"
  30. #include "xfs_ialloc_btree.h"
  31. #include "xfs_dinode.h"
  32. #include "xfs_inode.h"
  33. #include "xfs_ialloc.h"
  34. #include "xfs_itable.h"
  35. #include "xfs_rtalloc.h"
  36. #include "xfs_error.h"
  37. #include "xfs_bmap.h"
  38. #include "xfs_attr.h"
  39. #include "xfs_buf_item.h"
  40. #include "xfs_trans_space.h"
  41. #include "xfs_qm.h"
  42. #include "xfs_trace.h"
  43. #include "xfs_icache.h"
  44. #include "xfs_cksum.h"
  45. /*
  46. * The global quota manager. There is only one of these for the entire
  47. * system, _not_ one per file system. XQM keeps track of the overall
  48. * quota functionality, including maintaining the freelist and hash
  49. * tables of dquots.
  50. */
  51. STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
  52. STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
  53. STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  54. /*
  55. * We use the batch lookup interface to iterate over the dquots as it
  56. * currently is the only interface into the radix tree code that allows
  57. * fuzzy lookups instead of exact matches. Holding the lock over multiple
  58. * operations is fine as all callers are used either during mount/umount
  59. * or quotaoff.
  60. */
  61. #define XFS_DQ_LOOKUP_BATCH 32
  62. STATIC int
  63. xfs_qm_dquot_walk(
  64. struct xfs_mount *mp,
  65. int type,
  66. int (*execute)(struct xfs_dquot *dqp, void *data),
  67. void *data)
  68. {
  69. struct xfs_quotainfo *qi = mp->m_quotainfo;
  70. struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
  71. uint32_t next_index;
  72. int last_error = 0;
  73. int skipped;
  74. int nr_found;
  75. restart:
  76. skipped = 0;
  77. next_index = 0;
  78. nr_found = 0;
  79. while (1) {
  80. struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  81. int error = 0;
  82. int i;
  83. mutex_lock(&qi->qi_tree_lock);
  84. nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  85. next_index, XFS_DQ_LOOKUP_BATCH);
  86. if (!nr_found) {
  87. mutex_unlock(&qi->qi_tree_lock);
  88. break;
  89. }
  90. for (i = 0; i < nr_found; i++) {
  91. struct xfs_dquot *dqp = batch[i];
  92. next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
  93. error = execute(batch[i], data);
  94. if (error == EAGAIN) {
  95. skipped++;
  96. continue;
  97. }
  98. if (error && last_error != EFSCORRUPTED)
  99. last_error = error;
  100. }
  101. mutex_unlock(&qi->qi_tree_lock);
  102. /* bail out if the filesystem is corrupted. */
  103. if (last_error == EFSCORRUPTED) {
  104. skipped = 0;
  105. break;
  106. }
  107. }
  108. if (skipped) {
  109. delay(1);
  110. goto restart;
  111. }
  112. return last_error;
  113. }
  114. /*
  115. * Purge a dquot from all tracking data structures and free it.
  116. */
  117. STATIC int
  118. xfs_qm_dqpurge(
  119. struct xfs_dquot *dqp,
  120. void *data)
  121. {
  122. struct xfs_mount *mp = dqp->q_mount;
  123. struct xfs_quotainfo *qi = mp->m_quotainfo;
  124. struct xfs_dquot *gdqp = NULL;
  125. struct xfs_dquot *pdqp = NULL;
  126. xfs_dqlock(dqp);
  127. if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
  128. xfs_dqunlock(dqp);
  129. return EAGAIN;
  130. }
  131. /*
  132. * If this quota has a hint attached, prepare for releasing it now.
  133. */
  134. gdqp = dqp->q_gdquot;
  135. if (gdqp) {
  136. xfs_dqlock(gdqp);
  137. dqp->q_gdquot = NULL;
  138. }
  139. pdqp = dqp->q_pdquot;
  140. if (pdqp) {
  141. xfs_dqlock(pdqp);
  142. dqp->q_pdquot = NULL;
  143. }
  144. dqp->dq_flags |= XFS_DQ_FREEING;
  145. xfs_dqflock(dqp);
  146. /*
  147. * If we are turning this type of quotas off, we don't care
  148. * about the dirty metadata sitting in this dquot. OTOH, if
  149. * we're unmounting, we do care, so we flush it and wait.
  150. */
  151. if (XFS_DQ_IS_DIRTY(dqp)) {
  152. struct xfs_buf *bp = NULL;
  153. int error;
  154. /*
  155. * We don't care about getting disk errors here. We need
  156. * to purge this dquot anyway, so we go ahead regardless.
  157. */
  158. error = xfs_qm_dqflush(dqp, &bp);
  159. if (error) {
  160. xfs_warn(mp, "%s: dquot %p flush failed",
  161. __func__, dqp);
  162. } else {
  163. error = xfs_bwrite(bp);
  164. xfs_buf_relse(bp);
  165. }
  166. xfs_dqflock(dqp);
  167. }
  168. ASSERT(atomic_read(&dqp->q_pincount) == 0);
  169. ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
  170. !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
  171. xfs_dqfunlock(dqp);
  172. xfs_dqunlock(dqp);
  173. radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
  174. be32_to_cpu(dqp->q_core.d_id));
  175. qi->qi_dquots--;
  176. /*
  177. * We move dquots to the freelist as soon as their reference count
  178. * hits zero, so it really should be on the freelist here.
  179. */
  180. ASSERT(!list_empty(&dqp->q_lru));
  181. list_lru_del(&qi->qi_lru, &dqp->q_lru);
  182. XFS_STATS_DEC(xs_qm_dquot_unused);
  183. xfs_qm_dqdestroy(dqp);
  184. if (gdqp)
  185. xfs_qm_dqput(gdqp);
  186. if (pdqp)
  187. xfs_qm_dqput(pdqp);
  188. return 0;
  189. }
  190. /*
  191. * Purge the dquot cache.
  192. */
  193. void
  194. xfs_qm_dqpurge_all(
  195. struct xfs_mount *mp,
  196. uint flags)
  197. {
  198. if (flags & XFS_QMOPT_UQUOTA)
  199. xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
  200. if (flags & XFS_QMOPT_GQUOTA)
  201. xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
  202. if (flags & XFS_QMOPT_PQUOTA)
  203. xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
  204. }
  205. /*
  206. * Just destroy the quotainfo structure.
  207. */
  208. void
  209. xfs_qm_unmount(
  210. struct xfs_mount *mp)
  211. {
  212. if (mp->m_quotainfo) {
  213. xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
  214. xfs_qm_destroy_quotainfo(mp);
  215. }
  216. }
  217. /*
  218. * This is called from xfs_mountfs to start quotas and initialize all
  219. * necessary data structures like quotainfo. This is also responsible for
  220. * running a quotacheck as necessary. We are guaranteed that the superblock
  221. * is consistently read in at this point.
  222. *
  223. * If we fail here, the mount will continue with quota turned off. We don't
  224. * need to inidicate success or failure at all.
  225. */
  226. void
  227. xfs_qm_mount_quotas(
  228. xfs_mount_t *mp)
  229. {
  230. int error = 0;
  231. uint sbf;
  232. /*
  233. * If quotas on realtime volumes is not supported, we disable
  234. * quotas immediately.
  235. */
  236. if (mp->m_sb.sb_rextents) {
  237. xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
  238. mp->m_qflags = 0;
  239. goto write_changes;
  240. }
  241. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  242. /*
  243. * Allocate the quotainfo structure inside the mount struct, and
  244. * create quotainode(s), and change/rev superblock if necessary.
  245. */
  246. error = xfs_qm_init_quotainfo(mp);
  247. if (error) {
  248. /*
  249. * We must turn off quotas.
  250. */
  251. ASSERT(mp->m_quotainfo == NULL);
  252. mp->m_qflags = 0;
  253. goto write_changes;
  254. }
  255. /*
  256. * If any of the quotas are not consistent, do a quotacheck.
  257. */
  258. if (XFS_QM_NEED_QUOTACHECK(mp)) {
  259. error = xfs_qm_quotacheck(mp);
  260. if (error) {
  261. /* Quotacheck failed and disabled quotas. */
  262. return;
  263. }
  264. }
  265. /*
  266. * If one type of quotas is off, then it will lose its
  267. * quotachecked status, since we won't be doing accounting for
  268. * that type anymore.
  269. */
  270. if (!XFS_IS_UQUOTA_ON(mp))
  271. mp->m_qflags &= ~XFS_UQUOTA_CHKD;
  272. if (!XFS_IS_GQUOTA_ON(mp))
  273. mp->m_qflags &= ~XFS_GQUOTA_CHKD;
  274. if (!XFS_IS_PQUOTA_ON(mp))
  275. mp->m_qflags &= ~XFS_PQUOTA_CHKD;
  276. write_changes:
  277. /*
  278. * We actually don't have to acquire the m_sb_lock at all.
  279. * This can only be called from mount, and that's single threaded. XXX
  280. */
  281. spin_lock(&mp->m_sb_lock);
  282. sbf = mp->m_sb.sb_qflags;
  283. mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
  284. spin_unlock(&mp->m_sb_lock);
  285. if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
  286. if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
  287. /*
  288. * We could only have been turning quotas off.
  289. * We aren't in very good shape actually because
  290. * the incore structures are convinced that quotas are
  291. * off, but the on disk superblock doesn't know that !
  292. */
  293. ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
  294. xfs_alert(mp, "%s: Superblock update failed!",
  295. __func__);
  296. }
  297. }
  298. if (error) {
  299. xfs_warn(mp, "Failed to initialize disk quotas.");
  300. return;
  301. }
  302. }
  303. /*
  304. * Called from the vfsops layer.
  305. */
  306. void
  307. xfs_qm_unmount_quotas(
  308. xfs_mount_t *mp)
  309. {
  310. /*
  311. * Release the dquots that root inode, et al might be holding,
  312. * before we flush quotas and blow away the quotainfo structure.
  313. */
  314. ASSERT(mp->m_rootip);
  315. xfs_qm_dqdetach(mp->m_rootip);
  316. if (mp->m_rbmip)
  317. xfs_qm_dqdetach(mp->m_rbmip);
  318. if (mp->m_rsumip)
  319. xfs_qm_dqdetach(mp->m_rsumip);
  320. /*
  321. * Release the quota inodes.
  322. */
  323. if (mp->m_quotainfo) {
  324. if (mp->m_quotainfo->qi_uquotaip) {
  325. IRELE(mp->m_quotainfo->qi_uquotaip);
  326. mp->m_quotainfo->qi_uquotaip = NULL;
  327. }
  328. if (mp->m_quotainfo->qi_gquotaip) {
  329. IRELE(mp->m_quotainfo->qi_gquotaip);
  330. mp->m_quotainfo->qi_gquotaip = NULL;
  331. }
  332. if (mp->m_quotainfo->qi_pquotaip) {
  333. IRELE(mp->m_quotainfo->qi_pquotaip);
  334. mp->m_quotainfo->qi_pquotaip = NULL;
  335. }
  336. }
  337. }
  338. STATIC int
  339. xfs_qm_dqattach_one(
  340. xfs_inode_t *ip,
  341. xfs_dqid_t id,
  342. uint type,
  343. uint doalloc,
  344. xfs_dquot_t *udqhint, /* hint */
  345. xfs_dquot_t **IO_idqpp)
  346. {
  347. xfs_dquot_t *dqp;
  348. int error;
  349. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  350. error = 0;
  351. /*
  352. * See if we already have it in the inode itself. IO_idqpp is
  353. * &i_udquot or &i_gdquot. This made the code look weird, but
  354. * made the logic a lot simpler.
  355. */
  356. dqp = *IO_idqpp;
  357. if (dqp) {
  358. trace_xfs_dqattach_found(dqp);
  359. return 0;
  360. }
  361. /*
  362. * udqhint is the i_udquot field in inode, and is non-NULL only
  363. * when the type arg is group/project. Its purpose is to save a
  364. * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
  365. * the user dquot.
  366. */
  367. if (udqhint) {
  368. ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
  369. xfs_dqlock(udqhint);
  370. /*
  371. * No need to take dqlock to look at the id.
  372. *
  373. * The ID can't change until it gets reclaimed, and it won't
  374. * be reclaimed as long as we have a ref from inode and we
  375. * hold the ilock.
  376. */
  377. if (type == XFS_DQ_GROUP)
  378. dqp = udqhint->q_gdquot;
  379. else
  380. dqp = udqhint->q_pdquot;
  381. if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
  382. ASSERT(*IO_idqpp == NULL);
  383. *IO_idqpp = xfs_qm_dqhold(dqp);
  384. xfs_dqunlock(udqhint);
  385. return 0;
  386. }
  387. /*
  388. * We can't hold a dquot lock when we call the dqget code.
  389. * We'll deadlock in no time, because of (not conforming to)
  390. * lock ordering - the inodelock comes before any dquot lock,
  391. * and we may drop and reacquire the ilock in xfs_qm_dqget().
  392. */
  393. xfs_dqunlock(udqhint);
  394. }
  395. /*
  396. * Find the dquot from somewhere. This bumps the
  397. * reference count of dquot and returns it locked.
  398. * This can return ENOENT if dquot didn't exist on
  399. * disk and we didn't ask it to allocate;
  400. * ESRCH if quotas got turned off suddenly.
  401. */
  402. error = xfs_qm_dqget(ip->i_mount, ip, id, type,
  403. doalloc | XFS_QMOPT_DOWARN, &dqp);
  404. if (error)
  405. return error;
  406. trace_xfs_dqattach_get(dqp);
  407. /*
  408. * dqget may have dropped and re-acquired the ilock, but it guarantees
  409. * that the dquot returned is the one that should go in the inode.
  410. */
  411. *IO_idqpp = dqp;
  412. xfs_dqunlock(dqp);
  413. return 0;
  414. }
  415. /*
  416. * Given a udquot and group/project type, attach the group/project
  417. * dquot pointer to the udquot as a hint for future lookups.
  418. */
  419. STATIC void
  420. xfs_qm_dqattach_hint(
  421. struct xfs_inode *ip,
  422. int type)
  423. {
  424. struct xfs_dquot **dqhintp;
  425. struct xfs_dquot *dqp;
  426. struct xfs_dquot *udq = ip->i_udquot;
  427. ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
  428. xfs_dqlock(udq);
  429. if (type == XFS_DQ_GROUP) {
  430. dqp = ip->i_gdquot;
  431. dqhintp = &udq->q_gdquot;
  432. } else {
  433. dqp = ip->i_pdquot;
  434. dqhintp = &udq->q_pdquot;
  435. }
  436. if (*dqhintp) {
  437. struct xfs_dquot *tmp;
  438. if (*dqhintp == dqp)
  439. goto done;
  440. tmp = *dqhintp;
  441. *dqhintp = NULL;
  442. xfs_qm_dqrele(tmp);
  443. }
  444. *dqhintp = xfs_qm_dqhold(dqp);
  445. done:
  446. xfs_dqunlock(udq);
  447. }
  448. static bool
  449. xfs_qm_need_dqattach(
  450. struct xfs_inode *ip)
  451. {
  452. struct xfs_mount *mp = ip->i_mount;
  453. if (!XFS_IS_QUOTA_RUNNING(mp))
  454. return false;
  455. if (!XFS_IS_QUOTA_ON(mp))
  456. return false;
  457. if (!XFS_NOT_DQATTACHED(mp, ip))
  458. return false;
  459. if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
  460. return false;
  461. return true;
  462. }
  463. /*
  464. * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
  465. * into account.
  466. * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
  467. * Inode may get unlocked and relocked in here, and the caller must deal with
  468. * the consequences.
  469. */
  470. int
  471. xfs_qm_dqattach_locked(
  472. xfs_inode_t *ip,
  473. uint flags)
  474. {
  475. xfs_mount_t *mp = ip->i_mount;
  476. uint nquotas = 0;
  477. int error = 0;
  478. if (!xfs_qm_need_dqattach(ip))
  479. return 0;
  480. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  481. if (XFS_IS_UQUOTA_ON(mp)) {
  482. error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
  483. flags & XFS_QMOPT_DQALLOC,
  484. NULL, &ip->i_udquot);
  485. if (error)
  486. goto done;
  487. nquotas++;
  488. }
  489. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  490. if (XFS_IS_GQUOTA_ON(mp)) {
  491. error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
  492. flags & XFS_QMOPT_DQALLOC,
  493. ip->i_udquot, &ip->i_gdquot);
  494. /*
  495. * Don't worry about the udquot that we may have
  496. * attached above. It'll get detached, if not already.
  497. */
  498. if (error)
  499. goto done;
  500. nquotas++;
  501. }
  502. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  503. if (XFS_IS_PQUOTA_ON(mp)) {
  504. error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
  505. flags & XFS_QMOPT_DQALLOC,
  506. ip->i_udquot, &ip->i_pdquot);
  507. /*
  508. * Don't worry about the udquot that we may have
  509. * attached above. It'll get detached, if not already.
  510. */
  511. if (error)
  512. goto done;
  513. nquotas++;
  514. }
  515. /*
  516. * Attach this group/project quota to the user quota as a hint.
  517. * This WON'T, in general, result in a thrash.
  518. */
  519. if (nquotas > 1 && ip->i_udquot) {
  520. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  521. ASSERT(ip->i_gdquot || !XFS_IS_GQUOTA_ON(mp));
  522. ASSERT(ip->i_pdquot || !XFS_IS_PQUOTA_ON(mp));
  523. /*
  524. * We do not have i_udquot locked at this point, but this check
  525. * is OK since we don't depend on the i_gdquot to be accurate
  526. * 100% all the time. It is just a hint, and this will
  527. * succeed in general.
  528. */
  529. if (ip->i_udquot->q_gdquot != ip->i_gdquot)
  530. xfs_qm_dqattach_hint(ip, XFS_DQ_GROUP);
  531. if (ip->i_udquot->q_pdquot != ip->i_pdquot)
  532. xfs_qm_dqattach_hint(ip, XFS_DQ_PROJ);
  533. }
  534. done:
  535. #ifdef DEBUG
  536. if (!error) {
  537. if (XFS_IS_UQUOTA_ON(mp))
  538. ASSERT(ip->i_udquot);
  539. if (XFS_IS_GQUOTA_ON(mp))
  540. ASSERT(ip->i_gdquot);
  541. if (XFS_IS_PQUOTA_ON(mp))
  542. ASSERT(ip->i_pdquot);
  543. }
  544. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  545. #endif
  546. return error;
  547. }
  548. int
  549. xfs_qm_dqattach(
  550. struct xfs_inode *ip,
  551. uint flags)
  552. {
  553. int error;
  554. if (!xfs_qm_need_dqattach(ip))
  555. return 0;
  556. xfs_ilock(ip, XFS_ILOCK_EXCL);
  557. error = xfs_qm_dqattach_locked(ip, flags);
  558. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  559. return error;
  560. }
  561. /*
  562. * Release dquots (and their references) if any.
  563. * The inode should be locked EXCL except when this's called by
  564. * xfs_ireclaim.
  565. */
  566. void
  567. xfs_qm_dqdetach(
  568. xfs_inode_t *ip)
  569. {
  570. if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
  571. return;
  572. trace_xfs_dquot_dqdetach(ip);
  573. ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
  574. if (ip->i_udquot) {
  575. xfs_qm_dqrele(ip->i_udquot);
  576. ip->i_udquot = NULL;
  577. }
  578. if (ip->i_gdquot) {
  579. xfs_qm_dqrele(ip->i_gdquot);
  580. ip->i_gdquot = NULL;
  581. }
  582. if (ip->i_pdquot) {
  583. xfs_qm_dqrele(ip->i_pdquot);
  584. ip->i_pdquot = NULL;
  585. }
  586. }
  587. int
  588. xfs_qm_calc_dquots_per_chunk(
  589. struct xfs_mount *mp,
  590. unsigned int nbblks) /* basic block units */
  591. {
  592. unsigned int ndquots;
  593. ASSERT(nbblks > 0);
  594. ndquots = BBTOB(nbblks);
  595. do_div(ndquots, sizeof(xfs_dqblk_t));
  596. return ndquots;
  597. }
  598. struct xfs_qm_isolate {
  599. struct list_head buffers;
  600. struct list_head dispose;
  601. };
  602. static enum lru_status
  603. xfs_qm_dquot_isolate(
  604. struct list_head *item,
  605. spinlock_t *lru_lock,
  606. void *arg)
  607. {
  608. struct xfs_dquot *dqp = container_of(item,
  609. struct xfs_dquot, q_lru);
  610. struct xfs_qm_isolate *isol = arg;
  611. if (!xfs_dqlock_nowait(dqp))
  612. goto out_miss_busy;
  613. /*
  614. * This dquot has acquired a reference in the meantime remove it from
  615. * the freelist and try again.
  616. */
  617. if (dqp->q_nrefs) {
  618. xfs_dqunlock(dqp);
  619. XFS_STATS_INC(xs_qm_dqwants);
  620. trace_xfs_dqreclaim_want(dqp);
  621. list_del_init(&dqp->q_lru);
  622. XFS_STATS_DEC(xs_qm_dquot_unused);
  623. return LRU_REMOVED;
  624. }
  625. /*
  626. * If the dquot is dirty, flush it. If it's already being flushed, just
  627. * skip it so there is time for the IO to complete before we try to
  628. * reclaim it again on the next LRU pass.
  629. */
  630. if (!xfs_dqflock_nowait(dqp)) {
  631. xfs_dqunlock(dqp);
  632. goto out_miss_busy;
  633. }
  634. if (XFS_DQ_IS_DIRTY(dqp)) {
  635. struct xfs_buf *bp = NULL;
  636. int error;
  637. trace_xfs_dqreclaim_dirty(dqp);
  638. /* we have to drop the LRU lock to flush the dquot */
  639. spin_unlock(lru_lock);
  640. error = xfs_qm_dqflush(dqp, &bp);
  641. if (error) {
  642. xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
  643. __func__, dqp);
  644. goto out_unlock_dirty;
  645. }
  646. xfs_buf_delwri_queue(bp, &isol->buffers);
  647. xfs_buf_relse(bp);
  648. goto out_unlock_dirty;
  649. }
  650. xfs_dqfunlock(dqp);
  651. /*
  652. * Prevent lookups now that we are past the point of no return.
  653. */
  654. dqp->dq_flags |= XFS_DQ_FREEING;
  655. xfs_dqunlock(dqp);
  656. ASSERT(dqp->q_nrefs == 0);
  657. list_move_tail(&dqp->q_lru, &isol->dispose);
  658. XFS_STATS_DEC(xs_qm_dquot_unused);
  659. trace_xfs_dqreclaim_done(dqp);
  660. XFS_STATS_INC(xs_qm_dqreclaims);
  661. return LRU_REMOVED;
  662. out_miss_busy:
  663. trace_xfs_dqreclaim_busy(dqp);
  664. XFS_STATS_INC(xs_qm_dqreclaim_misses);
  665. return LRU_SKIP;
  666. out_unlock_dirty:
  667. trace_xfs_dqreclaim_busy(dqp);
  668. XFS_STATS_INC(xs_qm_dqreclaim_misses);
  669. xfs_dqunlock(dqp);
  670. spin_lock(lru_lock);
  671. return LRU_RETRY;
  672. }
  673. static unsigned long
  674. xfs_qm_shrink_scan(
  675. struct shrinker *shrink,
  676. struct shrink_control *sc)
  677. {
  678. struct xfs_quotainfo *qi = container_of(shrink,
  679. struct xfs_quotainfo, qi_shrinker);
  680. struct xfs_qm_isolate isol;
  681. unsigned long freed;
  682. int error;
  683. unsigned long nr_to_scan = sc->nr_to_scan;
  684. if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
  685. return 0;
  686. INIT_LIST_HEAD(&isol.buffers);
  687. INIT_LIST_HEAD(&isol.dispose);
  688. freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol,
  689. &nr_to_scan);
  690. error = xfs_buf_delwri_submit(&isol.buffers);
  691. if (error)
  692. xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
  693. while (!list_empty(&isol.dispose)) {
  694. struct xfs_dquot *dqp;
  695. dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
  696. list_del_init(&dqp->q_lru);
  697. xfs_qm_dqfree_one(dqp);
  698. }
  699. return freed;
  700. }
  701. static unsigned long
  702. xfs_qm_shrink_count(
  703. struct shrinker *shrink,
  704. struct shrink_control *sc)
  705. {
  706. struct xfs_quotainfo *qi = container_of(shrink,
  707. struct xfs_quotainfo, qi_shrinker);
  708. return list_lru_count_node(&qi->qi_lru, sc->nid);
  709. }
  710. /*
  711. * This initializes all the quota information that's kept in the
  712. * mount structure
  713. */
  714. STATIC int
  715. xfs_qm_init_quotainfo(
  716. xfs_mount_t *mp)
  717. {
  718. xfs_quotainfo_t *qinf;
  719. int error;
  720. xfs_dquot_t *dqp;
  721. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  722. qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
  723. /*
  724. * See if quotainodes are setup, and if not, allocate them,
  725. * and change the superblock accordingly.
  726. */
  727. if ((error = xfs_qm_init_quotainos(mp))) {
  728. kmem_free(qinf);
  729. mp->m_quotainfo = NULL;
  730. return error;
  731. }
  732. INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
  733. INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
  734. INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
  735. mutex_init(&qinf->qi_tree_lock);
  736. list_lru_init(&qinf->qi_lru);
  737. /* mutex used to serialize quotaoffs */
  738. mutex_init(&qinf->qi_quotaofflock);
  739. /* Precalc some constants */
  740. qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
  741. qinf->qi_dqperchunk = xfs_qm_calc_dquots_per_chunk(mp,
  742. qinf->qi_dqchunklen);
  743. mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
  744. /*
  745. * We try to get the limits from the superuser's limits fields.
  746. * This is quite hacky, but it is standard quota practice.
  747. *
  748. * We look at the USR dquot with id == 0 first, but if user quotas
  749. * are not enabled we goto the GRP dquot with id == 0.
  750. * We don't really care to keep separate default limits for user
  751. * and group quotas, at least not at this point.
  752. *
  753. * Since we may not have done a quotacheck by this point, just read
  754. * the dquot without attaching it to any hashtables or lists.
  755. */
  756. error = xfs_qm_dqread(mp, 0,
  757. XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
  758. (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
  759. XFS_DQ_PROJ),
  760. XFS_QMOPT_DOWARN, &dqp);
  761. if (!error) {
  762. xfs_disk_dquot_t *ddqp = &dqp->q_core;
  763. /*
  764. * The warnings and timers set the grace period given to
  765. * a user or group before he or she can not perform any
  766. * more writing. If it is zero, a default is used.
  767. */
  768. qinf->qi_btimelimit = ddqp->d_btimer ?
  769. be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
  770. qinf->qi_itimelimit = ddqp->d_itimer ?
  771. be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
  772. qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
  773. be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
  774. qinf->qi_bwarnlimit = ddqp->d_bwarns ?
  775. be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
  776. qinf->qi_iwarnlimit = ddqp->d_iwarns ?
  777. be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
  778. qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
  779. be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
  780. qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
  781. qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
  782. qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
  783. qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
  784. qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
  785. qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
  786. xfs_qm_dqdestroy(dqp);
  787. } else {
  788. qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
  789. qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
  790. qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
  791. qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
  792. qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
  793. qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
  794. }
  795. qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
  796. qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
  797. qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
  798. qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
  799. register_shrinker(&qinf->qi_shrinker);
  800. return 0;
  801. }
  802. /*
  803. * Gets called when unmounting a filesystem or when all quotas get
  804. * turned off.
  805. * This purges the quota inodes, destroys locks and frees itself.
  806. */
  807. void
  808. xfs_qm_destroy_quotainfo(
  809. xfs_mount_t *mp)
  810. {
  811. xfs_quotainfo_t *qi;
  812. qi = mp->m_quotainfo;
  813. ASSERT(qi != NULL);
  814. unregister_shrinker(&qi->qi_shrinker);
  815. if (qi->qi_uquotaip) {
  816. IRELE(qi->qi_uquotaip);
  817. qi->qi_uquotaip = NULL; /* paranoia */
  818. }
  819. if (qi->qi_gquotaip) {
  820. IRELE(qi->qi_gquotaip);
  821. qi->qi_gquotaip = NULL;
  822. }
  823. if (qi->qi_pquotaip) {
  824. IRELE(qi->qi_pquotaip);
  825. qi->qi_pquotaip = NULL;
  826. }
  827. mutex_destroy(&qi->qi_quotaofflock);
  828. kmem_free(qi);
  829. mp->m_quotainfo = NULL;
  830. }
  831. /*
  832. * Create an inode and return with a reference already taken, but unlocked
  833. * This is how we create quota inodes
  834. */
  835. STATIC int
  836. xfs_qm_qino_alloc(
  837. xfs_mount_t *mp,
  838. xfs_inode_t **ip,
  839. __int64_t sbfields,
  840. uint flags)
  841. {
  842. xfs_trans_t *tp;
  843. int error;
  844. int committed;
  845. *ip = NULL;
  846. /*
  847. * With superblock that doesn't have separate pquotino, we
  848. * share an inode between gquota and pquota. If the on-disk
  849. * superblock has GQUOTA and the filesystem is now mounted
  850. * with PQUOTA, just use sb_gquotino for sb_pquotino and
  851. * vice-versa.
  852. */
  853. if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
  854. (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
  855. xfs_ino_t ino = NULLFSINO;
  856. if ((flags & XFS_QMOPT_PQUOTA) &&
  857. (mp->m_sb.sb_gquotino != NULLFSINO)) {
  858. ino = mp->m_sb.sb_gquotino;
  859. ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
  860. } else if ((flags & XFS_QMOPT_GQUOTA) &&
  861. (mp->m_sb.sb_pquotino != NULLFSINO)) {
  862. ino = mp->m_sb.sb_pquotino;
  863. ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
  864. }
  865. if (ino != NULLFSINO) {
  866. error = xfs_iget(mp, NULL, ino, 0, 0, ip);
  867. if (error)
  868. return error;
  869. mp->m_sb.sb_gquotino = NULLFSINO;
  870. mp->m_sb.sb_pquotino = NULLFSINO;
  871. }
  872. }
  873. tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
  874. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
  875. XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
  876. if (error) {
  877. xfs_trans_cancel(tp, 0);
  878. return error;
  879. }
  880. if (!*ip) {
  881. error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
  882. &committed);
  883. if (error) {
  884. xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
  885. XFS_TRANS_ABORT);
  886. return error;
  887. }
  888. }
  889. /*
  890. * Make the changes in the superblock, and log those too.
  891. * sbfields arg may contain fields other than *QUOTINO;
  892. * VERSIONNUM for example.
  893. */
  894. spin_lock(&mp->m_sb_lock);
  895. if (flags & XFS_QMOPT_SBVERSION) {
  896. ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
  897. ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  898. XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
  899. (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  900. XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
  901. XFS_SB_QFLAGS));
  902. xfs_sb_version_addquota(&mp->m_sb);
  903. mp->m_sb.sb_uquotino = NULLFSINO;
  904. mp->m_sb.sb_gquotino = NULLFSINO;
  905. mp->m_sb.sb_pquotino = NULLFSINO;
  906. /* qflags will get updated fully _after_ quotacheck */
  907. mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
  908. }
  909. if (flags & XFS_QMOPT_UQUOTA)
  910. mp->m_sb.sb_uquotino = (*ip)->i_ino;
  911. else if (flags & XFS_QMOPT_GQUOTA)
  912. mp->m_sb.sb_gquotino = (*ip)->i_ino;
  913. else
  914. mp->m_sb.sb_pquotino = (*ip)->i_ino;
  915. spin_unlock(&mp->m_sb_lock);
  916. xfs_mod_sb(tp, sbfields);
  917. if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
  918. xfs_alert(mp, "%s failed (error %d)!", __func__, error);
  919. return error;
  920. }
  921. return 0;
  922. }
  923. STATIC void
  924. xfs_qm_reset_dqcounts(
  925. xfs_mount_t *mp,
  926. xfs_buf_t *bp,
  927. xfs_dqid_t id,
  928. uint type)
  929. {
  930. struct xfs_dqblk *dqb;
  931. int j;
  932. trace_xfs_reset_dqcounts(bp, _RET_IP_);
  933. /*
  934. * Reset all counters and timers. They'll be
  935. * started afresh by xfs_qm_quotacheck.
  936. */
  937. #ifdef DEBUG
  938. j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
  939. do_div(j, sizeof(xfs_dqblk_t));
  940. ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
  941. #endif
  942. dqb = bp->b_addr;
  943. for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
  944. struct xfs_disk_dquot *ddq;
  945. ddq = (struct xfs_disk_dquot *)&dqb[j];
  946. /*
  947. * Do a sanity check, and if needed, repair the dqblk. Don't
  948. * output any warnings because it's perfectly possible to
  949. * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
  950. */
  951. (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
  952. "xfs_quotacheck");
  953. ddq->d_bcount = 0;
  954. ddq->d_icount = 0;
  955. ddq->d_rtbcount = 0;
  956. ddq->d_btimer = 0;
  957. ddq->d_itimer = 0;
  958. ddq->d_rtbtimer = 0;
  959. ddq->d_bwarns = 0;
  960. ddq->d_iwarns = 0;
  961. ddq->d_rtbwarns = 0;
  962. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  963. xfs_update_cksum((char *)&dqb[j],
  964. sizeof(struct xfs_dqblk),
  965. XFS_DQUOT_CRC_OFF);
  966. }
  967. }
  968. }
  969. STATIC int
  970. xfs_qm_dqiter_bufs(
  971. struct xfs_mount *mp,
  972. xfs_dqid_t firstid,
  973. xfs_fsblock_t bno,
  974. xfs_filblks_t blkcnt,
  975. uint flags,
  976. struct list_head *buffer_list)
  977. {
  978. struct xfs_buf *bp;
  979. int error;
  980. int type;
  981. ASSERT(blkcnt > 0);
  982. type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
  983. (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
  984. error = 0;
  985. /*
  986. * Blkcnt arg can be a very big number, and might even be
  987. * larger than the log itself. So, we have to break it up into
  988. * manageable-sized transactions.
  989. * Note that we don't start a permanent transaction here; we might
  990. * not be able to get a log reservation for the whole thing up front,
  991. * and we don't really care to either, because we just discard
  992. * everything if we were to crash in the middle of this loop.
  993. */
  994. while (blkcnt--) {
  995. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
  996. XFS_FSB_TO_DADDR(mp, bno),
  997. mp->m_quotainfo->qi_dqchunklen, 0, &bp,
  998. &xfs_dquot_buf_ops);
  999. /*
  1000. * CRC and validation errors will return a EFSCORRUPTED here. If
  1001. * this occurs, re-read without CRC validation so that we can
  1002. * repair the damage via xfs_qm_reset_dqcounts(). This process
  1003. * will leave a trace in the log indicating corruption has
  1004. * been detected.
  1005. */
  1006. if (error == EFSCORRUPTED) {
  1007. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
  1008. XFS_FSB_TO_DADDR(mp, bno),
  1009. mp->m_quotainfo->qi_dqchunklen, 0, &bp,
  1010. NULL);
  1011. }
  1012. if (error)
  1013. break;
  1014. xfs_qm_reset_dqcounts(mp, bp, firstid, type);
  1015. xfs_buf_delwri_queue(bp, buffer_list);
  1016. xfs_buf_relse(bp);
  1017. /* goto the next block. */
  1018. bno++;
  1019. firstid += mp->m_quotainfo->qi_dqperchunk;
  1020. }
  1021. return error;
  1022. }
  1023. /*
  1024. * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
  1025. * caller supplied function for every chunk of dquots that we find.
  1026. */
  1027. STATIC int
  1028. xfs_qm_dqiterate(
  1029. struct xfs_mount *mp,
  1030. struct xfs_inode *qip,
  1031. uint flags,
  1032. struct list_head *buffer_list)
  1033. {
  1034. struct xfs_bmbt_irec *map;
  1035. int i, nmaps; /* number of map entries */
  1036. int error; /* return value */
  1037. xfs_fileoff_t lblkno;
  1038. xfs_filblks_t maxlblkcnt;
  1039. xfs_dqid_t firstid;
  1040. xfs_fsblock_t rablkno;
  1041. xfs_filblks_t rablkcnt;
  1042. error = 0;
  1043. /*
  1044. * This looks racy, but we can't keep an inode lock across a
  1045. * trans_reserve. But, this gets called during quotacheck, and that
  1046. * happens only at mount time which is single threaded.
  1047. */
  1048. if (qip->i_d.di_nblocks == 0)
  1049. return 0;
  1050. map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
  1051. lblkno = 0;
  1052. maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  1053. do {
  1054. nmaps = XFS_DQITER_MAP_SIZE;
  1055. /*
  1056. * We aren't changing the inode itself. Just changing
  1057. * some of its data. No new blocks are added here, and
  1058. * the inode is never added to the transaction.
  1059. */
  1060. xfs_ilock(qip, XFS_ILOCK_SHARED);
  1061. error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
  1062. map, &nmaps, 0);
  1063. xfs_iunlock(qip, XFS_ILOCK_SHARED);
  1064. if (error)
  1065. break;
  1066. ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
  1067. for (i = 0; i < nmaps; i++) {
  1068. ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
  1069. ASSERT(map[i].br_blockcount);
  1070. lblkno += map[i].br_blockcount;
  1071. if (map[i].br_startblock == HOLESTARTBLOCK)
  1072. continue;
  1073. firstid = (xfs_dqid_t) map[i].br_startoff *
  1074. mp->m_quotainfo->qi_dqperchunk;
  1075. /*
  1076. * Do a read-ahead on the next extent.
  1077. */
  1078. if ((i+1 < nmaps) &&
  1079. (map[i+1].br_startblock != HOLESTARTBLOCK)) {
  1080. rablkcnt = map[i+1].br_blockcount;
  1081. rablkno = map[i+1].br_startblock;
  1082. while (rablkcnt--) {
  1083. xfs_buf_readahead(mp->m_ddev_targp,
  1084. XFS_FSB_TO_DADDR(mp, rablkno),
  1085. mp->m_quotainfo->qi_dqchunklen,
  1086. NULL);
  1087. rablkno++;
  1088. }
  1089. }
  1090. /*
  1091. * Iterate thru all the blks in the extent and
  1092. * reset the counters of all the dquots inside them.
  1093. */
  1094. error = xfs_qm_dqiter_bufs(mp, firstid,
  1095. map[i].br_startblock,
  1096. map[i].br_blockcount,
  1097. flags, buffer_list);
  1098. if (error)
  1099. goto out;
  1100. }
  1101. } while (nmaps > 0);
  1102. out:
  1103. kmem_free(map);
  1104. return error;
  1105. }
  1106. /*
  1107. * Called by dqusage_adjust in doing a quotacheck.
  1108. *
  1109. * Given the inode, and a dquot id this updates both the incore dqout as well
  1110. * as the buffer copy. This is so that once the quotacheck is done, we can
  1111. * just log all the buffers, as opposed to logging numerous updates to
  1112. * individual dquots.
  1113. */
  1114. STATIC int
  1115. xfs_qm_quotacheck_dqadjust(
  1116. struct xfs_inode *ip,
  1117. xfs_dqid_t id,
  1118. uint type,
  1119. xfs_qcnt_t nblks,
  1120. xfs_qcnt_t rtblks)
  1121. {
  1122. struct xfs_mount *mp = ip->i_mount;
  1123. struct xfs_dquot *dqp;
  1124. int error;
  1125. error = xfs_qm_dqget(mp, ip, id, type,
  1126. XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
  1127. if (error) {
  1128. /*
  1129. * Shouldn't be able to turn off quotas here.
  1130. */
  1131. ASSERT(error != ESRCH);
  1132. ASSERT(error != ENOENT);
  1133. return error;
  1134. }
  1135. trace_xfs_dqadjust(dqp);
  1136. /*
  1137. * Adjust the inode count and the block count to reflect this inode's
  1138. * resource usage.
  1139. */
  1140. be64_add_cpu(&dqp->q_core.d_icount, 1);
  1141. dqp->q_res_icount++;
  1142. if (nblks) {
  1143. be64_add_cpu(&dqp->q_core.d_bcount, nblks);
  1144. dqp->q_res_bcount += nblks;
  1145. }
  1146. if (rtblks) {
  1147. be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
  1148. dqp->q_res_rtbcount += rtblks;
  1149. }
  1150. /*
  1151. * Set default limits, adjust timers (since we changed usages)
  1152. *
  1153. * There are no timers for the default values set in the root dquot.
  1154. */
  1155. if (dqp->q_core.d_id) {
  1156. xfs_qm_adjust_dqlimits(mp, dqp);
  1157. xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
  1158. }
  1159. dqp->dq_flags |= XFS_DQ_DIRTY;
  1160. xfs_qm_dqput(dqp);
  1161. return 0;
  1162. }
  1163. STATIC int
  1164. xfs_qm_get_rtblks(
  1165. xfs_inode_t *ip,
  1166. xfs_qcnt_t *O_rtblks)
  1167. {
  1168. xfs_filblks_t rtblks; /* total rt blks */
  1169. xfs_extnum_t idx; /* extent record index */
  1170. xfs_ifork_t *ifp; /* inode fork pointer */
  1171. xfs_extnum_t nextents; /* number of extent entries */
  1172. int error;
  1173. ASSERT(XFS_IS_REALTIME_INODE(ip));
  1174. ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
  1175. if (!(ifp->if_flags & XFS_IFEXTENTS)) {
  1176. if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
  1177. return error;
  1178. }
  1179. rtblks = 0;
  1180. nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
  1181. for (idx = 0; idx < nextents; idx++)
  1182. rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
  1183. *O_rtblks = (xfs_qcnt_t)rtblks;
  1184. return 0;
  1185. }
  1186. /*
  1187. * callback routine supplied to bulkstat(). Given an inumber, find its
  1188. * dquots and update them to account for resources taken by that inode.
  1189. */
  1190. /* ARGSUSED */
  1191. STATIC int
  1192. xfs_qm_dqusage_adjust(
  1193. xfs_mount_t *mp, /* mount point for filesystem */
  1194. xfs_ino_t ino, /* inode number to get data for */
  1195. void __user *buffer, /* not used */
  1196. int ubsize, /* not used */
  1197. int *ubused, /* not used */
  1198. int *res) /* result code value */
  1199. {
  1200. xfs_inode_t *ip;
  1201. xfs_qcnt_t nblks, rtblks = 0;
  1202. int error;
  1203. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1204. /*
  1205. * rootino must have its resources accounted for, not so with the quota
  1206. * inodes.
  1207. */
  1208. if (xfs_is_quota_inode(&mp->m_sb, ino)) {
  1209. *res = BULKSTAT_RV_NOTHING;
  1210. return XFS_ERROR(EINVAL);
  1211. }
  1212. /*
  1213. * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
  1214. * interface expects the inode to be exclusively locked because that's
  1215. * the case in all other instances. It's OK that we do this because
  1216. * quotacheck is done only at mount time.
  1217. */
  1218. error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
  1219. if (error) {
  1220. *res = BULKSTAT_RV_NOTHING;
  1221. return error;
  1222. }
  1223. ASSERT(ip->i_delayed_blks == 0);
  1224. if (XFS_IS_REALTIME_INODE(ip)) {
  1225. /*
  1226. * Walk thru the extent list and count the realtime blocks.
  1227. */
  1228. error = xfs_qm_get_rtblks(ip, &rtblks);
  1229. if (error)
  1230. goto error0;
  1231. }
  1232. nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
  1233. /*
  1234. * Add the (disk blocks and inode) resources occupied by this
  1235. * inode to its dquots. We do this adjustment in the incore dquot,
  1236. * and also copy the changes to its buffer.
  1237. * We don't care about putting these changes in a transaction
  1238. * envelope because if we crash in the middle of a 'quotacheck'
  1239. * we have to start from the beginning anyway.
  1240. * Once we're done, we'll log all the dquot bufs.
  1241. *
  1242. * The *QUOTA_ON checks below may look pretty racy, but quotachecks
  1243. * and quotaoffs don't race. (Quotachecks happen at mount time only).
  1244. */
  1245. if (XFS_IS_UQUOTA_ON(mp)) {
  1246. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
  1247. XFS_DQ_USER, nblks, rtblks);
  1248. if (error)
  1249. goto error0;
  1250. }
  1251. if (XFS_IS_GQUOTA_ON(mp)) {
  1252. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
  1253. XFS_DQ_GROUP, nblks, rtblks);
  1254. if (error)
  1255. goto error0;
  1256. }
  1257. if (XFS_IS_PQUOTA_ON(mp)) {
  1258. error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
  1259. XFS_DQ_PROJ, nblks, rtblks);
  1260. if (error)
  1261. goto error0;
  1262. }
  1263. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1264. IRELE(ip);
  1265. *res = BULKSTAT_RV_DIDONE;
  1266. return 0;
  1267. error0:
  1268. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1269. IRELE(ip);
  1270. *res = BULKSTAT_RV_GIVEUP;
  1271. return error;
  1272. }
  1273. STATIC int
  1274. xfs_qm_flush_one(
  1275. struct xfs_dquot *dqp,
  1276. void *data)
  1277. {
  1278. struct list_head *buffer_list = data;
  1279. struct xfs_buf *bp = NULL;
  1280. int error = 0;
  1281. xfs_dqlock(dqp);
  1282. if (dqp->dq_flags & XFS_DQ_FREEING)
  1283. goto out_unlock;
  1284. if (!XFS_DQ_IS_DIRTY(dqp))
  1285. goto out_unlock;
  1286. xfs_dqflock(dqp);
  1287. error = xfs_qm_dqflush(dqp, &bp);
  1288. if (error)
  1289. goto out_unlock;
  1290. xfs_buf_delwri_queue(bp, buffer_list);
  1291. xfs_buf_relse(bp);
  1292. out_unlock:
  1293. xfs_dqunlock(dqp);
  1294. return error;
  1295. }
  1296. /*
  1297. * Walk thru all the filesystem inodes and construct a consistent view
  1298. * of the disk quota world. If the quotacheck fails, disable quotas.
  1299. */
  1300. int
  1301. xfs_qm_quotacheck(
  1302. xfs_mount_t *mp)
  1303. {
  1304. int done, count, error, error2;
  1305. xfs_ino_t lastino;
  1306. size_t structsz;
  1307. uint flags;
  1308. LIST_HEAD (buffer_list);
  1309. struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
  1310. struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
  1311. struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
  1312. count = INT_MAX;
  1313. structsz = 1;
  1314. lastino = 0;
  1315. flags = 0;
  1316. ASSERT(uip || gip || pip);
  1317. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1318. xfs_notice(mp, "Quotacheck needed: Please wait.");
  1319. /*
  1320. * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
  1321. * their counters to zero. We need a clean slate.
  1322. * We don't log our changes till later.
  1323. */
  1324. if (uip) {
  1325. error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
  1326. &buffer_list);
  1327. if (error)
  1328. goto error_return;
  1329. flags |= XFS_UQUOTA_CHKD;
  1330. }
  1331. if (gip) {
  1332. error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
  1333. &buffer_list);
  1334. if (error)
  1335. goto error_return;
  1336. flags |= XFS_GQUOTA_CHKD;
  1337. }
  1338. if (pip) {
  1339. error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
  1340. &buffer_list);
  1341. if (error)
  1342. goto error_return;
  1343. flags |= XFS_PQUOTA_CHKD;
  1344. }
  1345. do {
  1346. /*
  1347. * Iterate thru all the inodes in the file system,
  1348. * adjusting the corresponding dquot counters in core.
  1349. */
  1350. error = xfs_bulkstat(mp, &lastino, &count,
  1351. xfs_qm_dqusage_adjust,
  1352. structsz, NULL, &done);
  1353. if (error)
  1354. break;
  1355. } while (!done);
  1356. /*
  1357. * We've made all the changes that we need to make incore. Flush them
  1358. * down to disk buffers if everything was updated successfully.
  1359. */
  1360. if (XFS_IS_UQUOTA_ON(mp)) {
  1361. error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
  1362. &buffer_list);
  1363. }
  1364. if (XFS_IS_GQUOTA_ON(mp)) {
  1365. error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
  1366. &buffer_list);
  1367. if (!error)
  1368. error = error2;
  1369. }
  1370. if (XFS_IS_PQUOTA_ON(mp)) {
  1371. error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
  1372. &buffer_list);
  1373. if (!error)
  1374. error = error2;
  1375. }
  1376. error2 = xfs_buf_delwri_submit(&buffer_list);
  1377. if (!error)
  1378. error = error2;
  1379. /*
  1380. * We can get this error if we couldn't do a dquot allocation inside
  1381. * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
  1382. * dirty dquots that might be cached, we just want to get rid of them
  1383. * and turn quotaoff. The dquots won't be attached to any of the inodes
  1384. * at this point (because we intentionally didn't in dqget_noattach).
  1385. */
  1386. if (error) {
  1387. xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
  1388. goto error_return;
  1389. }
  1390. /*
  1391. * If one type of quotas is off, then it will lose its
  1392. * quotachecked status, since we won't be doing accounting for
  1393. * that type anymore.
  1394. */
  1395. mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
  1396. mp->m_qflags |= flags;
  1397. error_return:
  1398. while (!list_empty(&buffer_list)) {
  1399. struct xfs_buf *bp =
  1400. list_first_entry(&buffer_list, struct xfs_buf, b_list);
  1401. list_del_init(&bp->b_list);
  1402. xfs_buf_relse(bp);
  1403. }
  1404. if (error) {
  1405. xfs_warn(mp,
  1406. "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
  1407. error);
  1408. /*
  1409. * We must turn off quotas.
  1410. */
  1411. ASSERT(mp->m_quotainfo != NULL);
  1412. xfs_qm_destroy_quotainfo(mp);
  1413. if (xfs_mount_reset_sbqflags(mp)) {
  1414. xfs_warn(mp,
  1415. "Quotacheck: Failed to reset quota flags.");
  1416. }
  1417. } else
  1418. xfs_notice(mp, "Quotacheck: Done.");
  1419. return (error);
  1420. }
  1421. /*
  1422. * This is called after the superblock has been read in and we're ready to
  1423. * iget the quota inodes.
  1424. */
  1425. STATIC int
  1426. xfs_qm_init_quotainos(
  1427. xfs_mount_t *mp)
  1428. {
  1429. struct xfs_inode *uip = NULL;
  1430. struct xfs_inode *gip = NULL;
  1431. struct xfs_inode *pip = NULL;
  1432. int error;
  1433. __int64_t sbflags = 0;
  1434. uint flags = 0;
  1435. ASSERT(mp->m_quotainfo);
  1436. /*
  1437. * Get the uquota and gquota inodes
  1438. */
  1439. if (xfs_sb_version_hasquota(&mp->m_sb)) {
  1440. if (XFS_IS_UQUOTA_ON(mp) &&
  1441. mp->m_sb.sb_uquotino != NULLFSINO) {
  1442. ASSERT(mp->m_sb.sb_uquotino > 0);
  1443. error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
  1444. 0, 0, &uip);
  1445. if (error)
  1446. return XFS_ERROR(error);
  1447. }
  1448. if (XFS_IS_GQUOTA_ON(mp) &&
  1449. mp->m_sb.sb_gquotino != NULLFSINO) {
  1450. ASSERT(mp->m_sb.sb_gquotino > 0);
  1451. error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
  1452. 0, 0, &gip);
  1453. if (error)
  1454. goto error_rele;
  1455. }
  1456. if (XFS_IS_PQUOTA_ON(mp) &&
  1457. mp->m_sb.sb_pquotino != NULLFSINO) {
  1458. ASSERT(mp->m_sb.sb_pquotino > 0);
  1459. error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
  1460. 0, 0, &pip);
  1461. if (error)
  1462. goto error_rele;
  1463. }
  1464. } else {
  1465. flags |= XFS_QMOPT_SBVERSION;
  1466. sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  1467. XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
  1468. XFS_SB_QFLAGS);
  1469. }
  1470. /*
  1471. * Create the three inodes, if they don't exist already. The changes
  1472. * made above will get added to a transaction and logged in one of
  1473. * the qino_alloc calls below. If the device is readonly,
  1474. * temporarily switch to read-write to do this.
  1475. */
  1476. if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
  1477. error = xfs_qm_qino_alloc(mp, &uip,
  1478. sbflags | XFS_SB_UQUOTINO,
  1479. flags | XFS_QMOPT_UQUOTA);
  1480. if (error)
  1481. goto error_rele;
  1482. flags &= ~XFS_QMOPT_SBVERSION;
  1483. }
  1484. if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
  1485. error = xfs_qm_qino_alloc(mp, &gip,
  1486. sbflags | XFS_SB_GQUOTINO,
  1487. flags | XFS_QMOPT_GQUOTA);
  1488. if (error)
  1489. goto error_rele;
  1490. flags &= ~XFS_QMOPT_SBVERSION;
  1491. }
  1492. if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
  1493. error = xfs_qm_qino_alloc(mp, &pip,
  1494. sbflags | XFS_SB_PQUOTINO,
  1495. flags | XFS_QMOPT_PQUOTA);
  1496. if (error)
  1497. goto error_rele;
  1498. }
  1499. mp->m_quotainfo->qi_uquotaip = uip;
  1500. mp->m_quotainfo->qi_gquotaip = gip;
  1501. mp->m_quotainfo->qi_pquotaip = pip;
  1502. return 0;
  1503. error_rele:
  1504. if (uip)
  1505. IRELE(uip);
  1506. if (gip)
  1507. IRELE(gip);
  1508. if (pip)
  1509. IRELE(pip);
  1510. return XFS_ERROR(error);
  1511. }
  1512. STATIC void
  1513. xfs_qm_dqfree_one(
  1514. struct xfs_dquot *dqp)
  1515. {
  1516. struct xfs_mount *mp = dqp->q_mount;
  1517. struct xfs_quotainfo *qi = mp->m_quotainfo;
  1518. mutex_lock(&qi->qi_tree_lock);
  1519. radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
  1520. be32_to_cpu(dqp->q_core.d_id));
  1521. qi->qi_dquots--;
  1522. mutex_unlock(&qi->qi_tree_lock);
  1523. xfs_qm_dqdestroy(dqp);
  1524. }
  1525. /*
  1526. * Start a transaction and write the incore superblock changes to
  1527. * disk. flags parameter indicates which fields have changed.
  1528. */
  1529. int
  1530. xfs_qm_write_sb_changes(
  1531. xfs_mount_t *mp,
  1532. __int64_t flags)
  1533. {
  1534. xfs_trans_t *tp;
  1535. int error;
  1536. tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
  1537. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
  1538. if (error) {
  1539. xfs_trans_cancel(tp, 0);
  1540. return error;
  1541. }
  1542. xfs_mod_sb(tp, flags);
  1543. error = xfs_trans_commit(tp, 0);
  1544. return error;
  1545. }
  1546. /* --------------- utility functions for vnodeops ---------------- */
  1547. /*
  1548. * Given an inode, a uid, gid and prid make sure that we have
  1549. * allocated relevant dquot(s) on disk, and that we won't exceed inode
  1550. * quotas by creating this file.
  1551. * This also attaches dquot(s) to the given inode after locking it,
  1552. * and returns the dquots corresponding to the uid and/or gid.
  1553. *
  1554. * in : inode (unlocked)
  1555. * out : udquot, gdquot with references taken and unlocked
  1556. */
  1557. int
  1558. xfs_qm_vop_dqalloc(
  1559. struct xfs_inode *ip,
  1560. xfs_dqid_t uid,
  1561. xfs_dqid_t gid,
  1562. prid_t prid,
  1563. uint flags,
  1564. struct xfs_dquot **O_udqpp,
  1565. struct xfs_dquot **O_gdqpp,
  1566. struct xfs_dquot **O_pdqpp)
  1567. {
  1568. struct xfs_mount *mp = ip->i_mount;
  1569. struct xfs_dquot *uq = NULL;
  1570. struct xfs_dquot *gq = NULL;
  1571. struct xfs_dquot *pq = NULL;
  1572. int error;
  1573. uint lockflags;
  1574. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1575. return 0;
  1576. lockflags = XFS_ILOCK_EXCL;
  1577. xfs_ilock(ip, lockflags);
  1578. if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
  1579. gid = ip->i_d.di_gid;
  1580. /*
  1581. * Attach the dquot(s) to this inode, doing a dquot allocation
  1582. * if necessary. The dquot(s) will not be locked.
  1583. */
  1584. if (XFS_NOT_DQATTACHED(mp, ip)) {
  1585. error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
  1586. if (error) {
  1587. xfs_iunlock(ip, lockflags);
  1588. return error;
  1589. }
  1590. }
  1591. if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
  1592. if (ip->i_d.di_uid != uid) {
  1593. /*
  1594. * What we need is the dquot that has this uid, and
  1595. * if we send the inode to dqget, the uid of the inode
  1596. * takes priority over what's sent in the uid argument.
  1597. * We must unlock inode here before calling dqget if
  1598. * we're not sending the inode, because otherwise
  1599. * we'll deadlock by doing trans_reserve while
  1600. * holding ilock.
  1601. */
  1602. xfs_iunlock(ip, lockflags);
  1603. error = xfs_qm_dqget(mp, NULL, uid,
  1604. XFS_DQ_USER,
  1605. XFS_QMOPT_DQALLOC |
  1606. XFS_QMOPT_DOWARN,
  1607. &uq);
  1608. if (error) {
  1609. ASSERT(error != ENOENT);
  1610. return error;
  1611. }
  1612. /*
  1613. * Get the ilock in the right order.
  1614. */
  1615. xfs_dqunlock(uq);
  1616. lockflags = XFS_ILOCK_SHARED;
  1617. xfs_ilock(ip, lockflags);
  1618. } else {
  1619. /*
  1620. * Take an extra reference, because we'll return
  1621. * this to caller
  1622. */
  1623. ASSERT(ip->i_udquot);
  1624. uq = xfs_qm_dqhold(ip->i_udquot);
  1625. }
  1626. }
  1627. if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
  1628. if (ip->i_d.di_gid != gid) {
  1629. xfs_iunlock(ip, lockflags);
  1630. error = xfs_qm_dqget(mp, NULL, gid,
  1631. XFS_DQ_GROUP,
  1632. XFS_QMOPT_DQALLOC |
  1633. XFS_QMOPT_DOWARN,
  1634. &gq);
  1635. if (error) {
  1636. ASSERT(error != ENOENT);
  1637. goto error_rele;
  1638. }
  1639. xfs_dqunlock(gq);
  1640. lockflags = XFS_ILOCK_SHARED;
  1641. xfs_ilock(ip, lockflags);
  1642. } else {
  1643. ASSERT(ip->i_gdquot);
  1644. gq = xfs_qm_dqhold(ip->i_gdquot);
  1645. }
  1646. }
  1647. if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
  1648. if (xfs_get_projid(ip) != prid) {
  1649. xfs_iunlock(ip, lockflags);
  1650. error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
  1651. XFS_DQ_PROJ,
  1652. XFS_QMOPT_DQALLOC |
  1653. XFS_QMOPT_DOWARN,
  1654. &pq);
  1655. if (error) {
  1656. ASSERT(error != ENOENT);
  1657. goto error_rele;
  1658. }
  1659. xfs_dqunlock(pq);
  1660. lockflags = XFS_ILOCK_SHARED;
  1661. xfs_ilock(ip, lockflags);
  1662. } else {
  1663. ASSERT(ip->i_pdquot);
  1664. pq = xfs_qm_dqhold(ip->i_pdquot);
  1665. }
  1666. }
  1667. if (uq)
  1668. trace_xfs_dquot_dqalloc(ip);
  1669. xfs_iunlock(ip, lockflags);
  1670. if (O_udqpp)
  1671. *O_udqpp = uq;
  1672. else if (uq)
  1673. xfs_qm_dqrele(uq);
  1674. if (O_gdqpp)
  1675. *O_gdqpp = gq;
  1676. else if (gq)
  1677. xfs_qm_dqrele(gq);
  1678. if (O_pdqpp)
  1679. *O_pdqpp = pq;
  1680. else if (pq)
  1681. xfs_qm_dqrele(pq);
  1682. return 0;
  1683. error_rele:
  1684. if (gq)
  1685. xfs_qm_dqrele(gq);
  1686. if (uq)
  1687. xfs_qm_dqrele(uq);
  1688. return error;
  1689. }
  1690. /*
  1691. * Actually transfer ownership, and do dquot modifications.
  1692. * These were already reserved.
  1693. */
  1694. xfs_dquot_t *
  1695. xfs_qm_vop_chown(
  1696. xfs_trans_t *tp,
  1697. xfs_inode_t *ip,
  1698. xfs_dquot_t **IO_olddq,
  1699. xfs_dquot_t *newdq)
  1700. {
  1701. xfs_dquot_t *prevdq;
  1702. uint bfield = XFS_IS_REALTIME_INODE(ip) ?
  1703. XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
  1704. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1705. ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
  1706. /* old dquot */
  1707. prevdq = *IO_olddq;
  1708. ASSERT(prevdq);
  1709. ASSERT(prevdq != newdq);
  1710. xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
  1711. xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
  1712. /* the sparkling new dquot */
  1713. xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
  1714. xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
  1715. /*
  1716. * Take an extra reference, because the inode is going to keep
  1717. * this dquot pointer even after the trans_commit.
  1718. */
  1719. *IO_olddq = xfs_qm_dqhold(newdq);
  1720. return prevdq;
  1721. }
  1722. /*
  1723. * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
  1724. */
  1725. int
  1726. xfs_qm_vop_chown_reserve(
  1727. struct xfs_trans *tp,
  1728. struct xfs_inode *ip,
  1729. struct xfs_dquot *udqp,
  1730. struct xfs_dquot *gdqp,
  1731. struct xfs_dquot *pdqp,
  1732. uint flags)
  1733. {
  1734. struct xfs_mount *mp = ip->i_mount;
  1735. uint delblks, blkflags, prjflags = 0;
  1736. struct xfs_dquot *udq_unres = NULL;
  1737. struct xfs_dquot *gdq_unres = NULL;
  1738. struct xfs_dquot *pdq_unres = NULL;
  1739. struct xfs_dquot *udq_delblks = NULL;
  1740. struct xfs_dquot *gdq_delblks = NULL;
  1741. struct xfs_dquot *pdq_delblks = NULL;
  1742. int error;
  1743. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  1744. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1745. delblks = ip->i_delayed_blks;
  1746. blkflags = XFS_IS_REALTIME_INODE(ip) ?
  1747. XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
  1748. if (XFS_IS_UQUOTA_ON(mp) && udqp &&
  1749. ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
  1750. udq_delblks = udqp;
  1751. /*
  1752. * If there are delayed allocation blocks, then we have to
  1753. * unreserve those from the old dquot, and add them to the
  1754. * new dquot.
  1755. */
  1756. if (delblks) {
  1757. ASSERT(ip->i_udquot);
  1758. udq_unres = ip->i_udquot;
  1759. }
  1760. }
  1761. if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
  1762. ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
  1763. gdq_delblks = gdqp;
  1764. if (delblks) {
  1765. ASSERT(ip->i_gdquot);
  1766. gdq_unres = ip->i_gdquot;
  1767. }
  1768. }
  1769. if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
  1770. xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
  1771. prjflags = XFS_QMOPT_ENOSPC;
  1772. pdq_delblks = pdqp;
  1773. if (delblks) {
  1774. ASSERT(ip->i_pdquot);
  1775. pdq_unres = ip->i_pdquot;
  1776. }
  1777. }
  1778. error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
  1779. udq_delblks, gdq_delblks, pdq_delblks,
  1780. ip->i_d.di_nblocks, 1,
  1781. flags | blkflags | prjflags);
  1782. if (error)
  1783. return error;
  1784. /*
  1785. * Do the delayed blks reservations/unreservations now. Since, these
  1786. * are done without the help of a transaction, if a reservation fails
  1787. * its previous reservations won't be automatically undone by trans
  1788. * code. So, we have to do it manually here.
  1789. */
  1790. if (delblks) {
  1791. /*
  1792. * Do the reservations first. Unreservation can't fail.
  1793. */
  1794. ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
  1795. ASSERT(udq_unres || gdq_unres || pdq_unres);
  1796. error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
  1797. udq_delblks, gdq_delblks, pdq_delblks,
  1798. (xfs_qcnt_t)delblks, 0,
  1799. flags | blkflags | prjflags);
  1800. if (error)
  1801. return error;
  1802. xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
  1803. udq_unres, gdq_unres, pdq_unres,
  1804. -((xfs_qcnt_t)delblks), 0, blkflags);
  1805. }
  1806. return (0);
  1807. }
  1808. int
  1809. xfs_qm_vop_rename_dqattach(
  1810. struct xfs_inode **i_tab)
  1811. {
  1812. struct xfs_mount *mp = i_tab[0]->i_mount;
  1813. int i;
  1814. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1815. return 0;
  1816. for (i = 0; (i < 4 && i_tab[i]); i++) {
  1817. struct xfs_inode *ip = i_tab[i];
  1818. int error;
  1819. /*
  1820. * Watch out for duplicate entries in the table.
  1821. */
  1822. if (i == 0 || ip != i_tab[i-1]) {
  1823. if (XFS_NOT_DQATTACHED(mp, ip)) {
  1824. error = xfs_qm_dqattach(ip, 0);
  1825. if (error)
  1826. return error;
  1827. }
  1828. }
  1829. }
  1830. return 0;
  1831. }
  1832. void
  1833. xfs_qm_vop_create_dqattach(
  1834. struct xfs_trans *tp,
  1835. struct xfs_inode *ip,
  1836. struct xfs_dquot *udqp,
  1837. struct xfs_dquot *gdqp,
  1838. struct xfs_dquot *pdqp)
  1839. {
  1840. struct xfs_mount *mp = tp->t_mountp;
  1841. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1842. return;
  1843. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1844. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1845. if (udqp) {
  1846. ASSERT(ip->i_udquot == NULL);
  1847. ASSERT(XFS_IS_UQUOTA_ON(mp));
  1848. ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
  1849. ip->i_udquot = xfs_qm_dqhold(udqp);
  1850. xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
  1851. }
  1852. if (gdqp) {
  1853. ASSERT(ip->i_gdquot == NULL);
  1854. ASSERT(XFS_IS_GQUOTA_ON(mp));
  1855. ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
  1856. ip->i_gdquot = xfs_qm_dqhold(gdqp);
  1857. xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
  1858. }
  1859. if (pdqp) {
  1860. ASSERT(ip->i_pdquot == NULL);
  1861. ASSERT(XFS_IS_PQUOTA_ON(mp));
  1862. ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
  1863. ip->i_pdquot = xfs_qm_dqhold(pdqp);
  1864. xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
  1865. }
  1866. }