xfs_qm.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_bit.h"
  21. #include "xfs_log.h"
  22. #include "xfs_inum.h"
  23. #include "xfs_trans.h"
  24. #include "xfs_sb.h"
  25. #include "xfs_ag.h"
  26. #include "xfs_alloc.h"
  27. #include "xfs_quota.h"
  28. #include "xfs_mount.h"
  29. #include "xfs_bmap_btree.h"
  30. #include "xfs_ialloc_btree.h"
  31. #include "xfs_dinode.h"
  32. #include "xfs_inode.h"
  33. #include "xfs_ialloc.h"
  34. #include "xfs_itable.h"
  35. #include "xfs_rtalloc.h"
  36. #include "xfs_error.h"
  37. #include "xfs_bmap.h"
  38. #include "xfs_attr.h"
  39. #include "xfs_buf_item.h"
  40. #include "xfs_trans_space.h"
  41. #include "xfs_utils.h"
  42. #include "xfs_qm.h"
  43. #include "xfs_trace.h"
  44. /*
  45. * The global quota manager. There is only one of these for the entire
  46. * system, _not_ one per file system. XQM keeps track of the overall
  47. * quota functionality, including maintaining the freelist and hash
  48. * tables of dquots.
  49. */
  50. struct mutex xfs_Gqm_lock;
  51. struct xfs_qm *xfs_Gqm;
  52. uint ndquot;
  53. kmem_zone_t *qm_dqzone;
  54. kmem_zone_t *qm_dqtrxzone;
  55. STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
  56. STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
  57. STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
  58. STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
  59. STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *);
  60. static struct shrinker xfs_qm_shaker = {
  61. .shrink = xfs_qm_shake,
  62. .seeks = DEFAULT_SEEKS,
  63. };
  64. /*
  65. * Initialize the XQM structure.
  66. * Note that there is not one quota manager per file system.
  67. */
  68. STATIC struct xfs_qm *
  69. xfs_Gqm_init(void)
  70. {
  71. xfs_dqhash_t *udqhash, *gdqhash;
  72. xfs_qm_t *xqm;
  73. size_t hsize;
  74. uint i;
  75. /*
  76. * Initialize the dquot hash tables.
  77. */
  78. udqhash = kmem_zalloc_greedy(&hsize,
  79. XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
  80. XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t));
  81. if (!udqhash)
  82. goto out;
  83. gdqhash = kmem_zalloc_large(hsize);
  84. if (!gdqhash)
  85. goto out_free_udqhash;
  86. hsize /= sizeof(xfs_dqhash_t);
  87. ndquot = hsize << 8;
  88. xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
  89. xqm->qm_dqhashmask = hsize - 1;
  90. xqm->qm_usr_dqhtable = udqhash;
  91. xqm->qm_grp_dqhtable = gdqhash;
  92. ASSERT(xqm->qm_usr_dqhtable != NULL);
  93. ASSERT(xqm->qm_grp_dqhtable != NULL);
  94. for (i = 0; i < hsize; i++) {
  95. xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i);
  96. xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i);
  97. }
  98. /*
  99. * Freelist of all dquots of all file systems
  100. */
  101. INIT_LIST_HEAD(&xqm->qm_dqfrlist);
  102. xqm->qm_dqfrlist_cnt = 0;
  103. mutex_init(&xqm->qm_dqfrlist_lock);
  104. /*
  105. * dquot zone. we register our own low-memory callback.
  106. */
  107. if (!qm_dqzone) {
  108. xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t),
  109. "xfs_dquots");
  110. qm_dqzone = xqm->qm_dqzone;
  111. } else
  112. xqm->qm_dqzone = qm_dqzone;
  113. register_shrinker(&xfs_qm_shaker);
  114. /*
  115. * The t_dqinfo portion of transactions.
  116. */
  117. if (!qm_dqtrxzone) {
  118. xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t),
  119. "xfs_dqtrx");
  120. qm_dqtrxzone = xqm->qm_dqtrxzone;
  121. } else
  122. xqm->qm_dqtrxzone = qm_dqtrxzone;
  123. atomic_set(&xqm->qm_totaldquots, 0);
  124. xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
  125. xqm->qm_nrefs = 0;
  126. return xqm;
  127. out_free_udqhash:
  128. kmem_free_large(udqhash);
  129. out:
  130. return NULL;
  131. }
  132. /*
  133. * Destroy the global quota manager when its reference count goes to zero.
  134. */
  135. STATIC void
  136. xfs_qm_destroy(
  137. struct xfs_qm *xqm)
  138. {
  139. struct xfs_dquot *dqp, *n;
  140. int hsize, i;
  141. ASSERT(xqm != NULL);
  142. ASSERT(xqm->qm_nrefs == 0);
  143. unregister_shrinker(&xfs_qm_shaker);
  144. hsize = xqm->qm_dqhashmask + 1;
  145. for (i = 0; i < hsize; i++) {
  146. xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
  147. xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
  148. }
  149. kmem_free_large(xqm->qm_usr_dqhtable);
  150. kmem_free_large(xqm->qm_grp_dqhtable);
  151. xqm->qm_usr_dqhtable = NULL;
  152. xqm->qm_grp_dqhtable = NULL;
  153. xqm->qm_dqhashmask = 0;
  154. /* frlist cleanup */
  155. mutex_lock(&xqm->qm_dqfrlist_lock);
  156. list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
  157. xfs_dqlock(dqp);
  158. list_del_init(&dqp->q_freelist);
  159. xfs_Gqm->qm_dqfrlist_cnt--;
  160. xfs_dqunlock(dqp);
  161. xfs_qm_dqdestroy(dqp);
  162. }
  163. mutex_unlock(&xqm->qm_dqfrlist_lock);
  164. mutex_destroy(&xqm->qm_dqfrlist_lock);
  165. kmem_free(xqm);
  166. }
  167. /*
  168. * Called at mount time to let XQM know that another file system is
  169. * starting quotas. This isn't crucial information as the individual mount
  170. * structures are pretty independent, but it helps the XQM keep a
  171. * global view of what's going on.
  172. */
  173. /* ARGSUSED */
  174. STATIC int
  175. xfs_qm_hold_quotafs_ref(
  176. struct xfs_mount *mp)
  177. {
  178. /*
  179. * Need to lock the xfs_Gqm structure for things like this. For example,
  180. * the structure could disappear between the entry to this routine and
  181. * a HOLD operation if not locked.
  182. */
  183. mutex_lock(&xfs_Gqm_lock);
  184. if (!xfs_Gqm) {
  185. xfs_Gqm = xfs_Gqm_init();
  186. if (!xfs_Gqm) {
  187. mutex_unlock(&xfs_Gqm_lock);
  188. return ENOMEM;
  189. }
  190. }
  191. /*
  192. * We can keep a list of all filesystems with quotas mounted for
  193. * debugging and statistical purposes, but ...
  194. * Just take a reference and get out.
  195. */
  196. xfs_Gqm->qm_nrefs++;
  197. mutex_unlock(&xfs_Gqm_lock);
  198. return 0;
  199. }
  200. /*
  201. * Release the reference that a filesystem took at mount time,
  202. * so that we know when we need to destroy the entire quota manager.
  203. */
  204. /* ARGSUSED */
  205. STATIC void
  206. xfs_qm_rele_quotafs_ref(
  207. struct xfs_mount *mp)
  208. {
  209. xfs_dquot_t *dqp, *n;
  210. ASSERT(xfs_Gqm);
  211. ASSERT(xfs_Gqm->qm_nrefs > 0);
  212. /*
  213. * Go thru the freelist and destroy all inactive dquots.
  214. */
  215. mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
  216. list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) {
  217. xfs_dqlock(dqp);
  218. if (dqp->dq_flags & XFS_DQ_INACTIVE) {
  219. ASSERT(dqp->q_mount == NULL);
  220. ASSERT(! XFS_DQ_IS_DIRTY(dqp));
  221. ASSERT(list_empty(&dqp->q_hashlist));
  222. ASSERT(list_empty(&dqp->q_mplist));
  223. list_del_init(&dqp->q_freelist);
  224. xfs_Gqm->qm_dqfrlist_cnt--;
  225. xfs_dqunlock(dqp);
  226. xfs_qm_dqdestroy(dqp);
  227. } else {
  228. xfs_dqunlock(dqp);
  229. }
  230. }
  231. mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
  232. /*
  233. * Destroy the entire XQM. If somebody mounts with quotaon, this'll
  234. * be restarted.
  235. */
  236. mutex_lock(&xfs_Gqm_lock);
  237. if (--xfs_Gqm->qm_nrefs == 0) {
  238. xfs_qm_destroy(xfs_Gqm);
  239. xfs_Gqm = NULL;
  240. }
  241. mutex_unlock(&xfs_Gqm_lock);
  242. }
  243. /*
  244. * Just destroy the quotainfo structure.
  245. */
  246. void
  247. xfs_qm_unmount(
  248. struct xfs_mount *mp)
  249. {
  250. if (mp->m_quotainfo) {
  251. xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
  252. xfs_qm_destroy_quotainfo(mp);
  253. }
  254. }
  255. /*
  256. * This is called from xfs_mountfs to start quotas and initialize all
  257. * necessary data structures like quotainfo. This is also responsible for
  258. * running a quotacheck as necessary. We are guaranteed that the superblock
  259. * is consistently read in at this point.
  260. *
  261. * If we fail here, the mount will continue with quota turned off. We don't
  262. * need to inidicate success or failure at all.
  263. */
  264. void
  265. xfs_qm_mount_quotas(
  266. xfs_mount_t *mp)
  267. {
  268. int error = 0;
  269. uint sbf;
  270. /*
  271. * If quotas on realtime volumes is not supported, we disable
  272. * quotas immediately.
  273. */
  274. if (mp->m_sb.sb_rextents) {
  275. xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
  276. mp->m_qflags = 0;
  277. goto write_changes;
  278. }
  279. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  280. /*
  281. * Allocate the quotainfo structure inside the mount struct, and
  282. * create quotainode(s), and change/rev superblock if necessary.
  283. */
  284. error = xfs_qm_init_quotainfo(mp);
  285. if (error) {
  286. /*
  287. * We must turn off quotas.
  288. */
  289. ASSERT(mp->m_quotainfo == NULL);
  290. mp->m_qflags = 0;
  291. goto write_changes;
  292. }
  293. /*
  294. * If any of the quotas are not consistent, do a quotacheck.
  295. */
  296. if (XFS_QM_NEED_QUOTACHECK(mp)) {
  297. error = xfs_qm_quotacheck(mp);
  298. if (error) {
  299. /* Quotacheck failed and disabled quotas. */
  300. return;
  301. }
  302. }
  303. /*
  304. * If one type of quotas is off, then it will lose its
  305. * quotachecked status, since we won't be doing accounting for
  306. * that type anymore.
  307. */
  308. if (!XFS_IS_UQUOTA_ON(mp))
  309. mp->m_qflags &= ~XFS_UQUOTA_CHKD;
  310. if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
  311. mp->m_qflags &= ~XFS_OQUOTA_CHKD;
  312. write_changes:
  313. /*
  314. * We actually don't have to acquire the m_sb_lock at all.
  315. * This can only be called from mount, and that's single threaded. XXX
  316. */
  317. spin_lock(&mp->m_sb_lock);
  318. sbf = mp->m_sb.sb_qflags;
  319. mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
  320. spin_unlock(&mp->m_sb_lock);
  321. if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
  322. if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
  323. /*
  324. * We could only have been turning quotas off.
  325. * We aren't in very good shape actually because
  326. * the incore structures are convinced that quotas are
  327. * off, but the on disk superblock doesn't know that !
  328. */
  329. ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
  330. xfs_alert(mp, "%s: Superblock update failed!",
  331. __func__);
  332. }
  333. }
  334. if (error) {
  335. xfs_warn(mp, "Failed to initialize disk quotas.");
  336. return;
  337. }
  338. }
  339. /*
  340. * Called from the vfsops layer.
  341. */
  342. void
  343. xfs_qm_unmount_quotas(
  344. xfs_mount_t *mp)
  345. {
  346. /*
  347. * Release the dquots that root inode, et al might be holding,
  348. * before we flush quotas and blow away the quotainfo structure.
  349. */
  350. ASSERT(mp->m_rootip);
  351. xfs_qm_dqdetach(mp->m_rootip);
  352. if (mp->m_rbmip)
  353. xfs_qm_dqdetach(mp->m_rbmip);
  354. if (mp->m_rsumip)
  355. xfs_qm_dqdetach(mp->m_rsumip);
  356. /*
  357. * Release the quota inodes.
  358. */
  359. if (mp->m_quotainfo) {
  360. if (mp->m_quotainfo->qi_uquotaip) {
  361. IRELE(mp->m_quotainfo->qi_uquotaip);
  362. mp->m_quotainfo->qi_uquotaip = NULL;
  363. }
  364. if (mp->m_quotainfo->qi_gquotaip) {
  365. IRELE(mp->m_quotainfo->qi_gquotaip);
  366. mp->m_quotainfo->qi_gquotaip = NULL;
  367. }
  368. }
  369. }
  370. /*
  371. * Flush all dquots of the given file system to disk. The dquots are
  372. * _not_ purged from memory here, just their data written to disk.
  373. */
  374. STATIC int
  375. xfs_qm_dqflush_all(
  376. struct xfs_mount *mp,
  377. int sync_mode)
  378. {
  379. struct xfs_quotainfo *q = mp->m_quotainfo;
  380. int recl;
  381. struct xfs_dquot *dqp;
  382. int error;
  383. if (!q)
  384. return 0;
  385. again:
  386. mutex_lock(&q->qi_dqlist_lock);
  387. list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
  388. xfs_dqlock(dqp);
  389. if (! XFS_DQ_IS_DIRTY(dqp)) {
  390. xfs_dqunlock(dqp);
  391. continue;
  392. }
  393. /* XXX a sentinel would be better */
  394. recl = q->qi_dqreclaims;
  395. if (!xfs_dqflock_nowait(dqp)) {
  396. /*
  397. * If we can't grab the flush lock then check
  398. * to see if the dquot has been flushed delayed
  399. * write. If so, grab its buffer and send it
  400. * out immediately. We'll be able to acquire
  401. * the flush lock when the I/O completes.
  402. */
  403. xfs_qm_dqflock_pushbuf_wait(dqp);
  404. }
  405. /*
  406. * Let go of the mplist lock. We don't want to hold it
  407. * across a disk write.
  408. */
  409. mutex_unlock(&q->qi_dqlist_lock);
  410. error = xfs_qm_dqflush(dqp, sync_mode);
  411. xfs_dqunlock(dqp);
  412. if (error)
  413. return error;
  414. mutex_lock(&q->qi_dqlist_lock);
  415. if (recl != q->qi_dqreclaims) {
  416. mutex_unlock(&q->qi_dqlist_lock);
  417. /* XXX restart limit */
  418. goto again;
  419. }
  420. }
  421. mutex_unlock(&q->qi_dqlist_lock);
  422. /* return ! busy */
  423. return 0;
  424. }
  425. /*
  426. * Release the group dquot pointers the user dquots may be
  427. * carrying around as a hint. mplist is locked on entry and exit.
  428. */
  429. STATIC void
  430. xfs_qm_detach_gdquots(
  431. struct xfs_mount *mp)
  432. {
  433. struct xfs_quotainfo *q = mp->m_quotainfo;
  434. struct xfs_dquot *dqp, *gdqp;
  435. int nrecl;
  436. again:
  437. ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
  438. list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
  439. xfs_dqlock(dqp);
  440. if ((gdqp = dqp->q_gdquot)) {
  441. xfs_dqlock(gdqp);
  442. dqp->q_gdquot = NULL;
  443. }
  444. xfs_dqunlock(dqp);
  445. if (gdqp) {
  446. /*
  447. * Can't hold the mplist lock across a dqput.
  448. * XXXmust convert to marker based iterations here.
  449. */
  450. nrecl = q->qi_dqreclaims;
  451. mutex_unlock(&q->qi_dqlist_lock);
  452. xfs_qm_dqput(gdqp);
  453. mutex_lock(&q->qi_dqlist_lock);
  454. if (nrecl != q->qi_dqreclaims)
  455. goto again;
  456. }
  457. }
  458. }
  459. /*
  460. * Go through all the incore dquots of this file system and take them
  461. * off the mplist and hashlist, if the dquot type matches the dqtype
  462. * parameter. This is used when turning off quota accounting for
  463. * users and/or groups, as well as when the filesystem is unmounting.
  464. */
  465. STATIC int
  466. xfs_qm_dqpurge_int(
  467. struct xfs_mount *mp,
  468. uint flags)
  469. {
  470. struct xfs_quotainfo *q = mp->m_quotainfo;
  471. struct xfs_dquot *dqp, *n;
  472. uint dqtype;
  473. int nrecl;
  474. int nmisses;
  475. if (!q)
  476. return 0;
  477. dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
  478. dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
  479. dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
  480. mutex_lock(&q->qi_dqlist_lock);
  481. /*
  482. * In the first pass through all incore dquots of this filesystem,
  483. * we release the group dquot pointers the user dquots may be
  484. * carrying around as a hint. We need to do this irrespective of
  485. * what's being turned off.
  486. */
  487. xfs_qm_detach_gdquots(mp);
  488. again:
  489. nmisses = 0;
  490. ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
  491. /*
  492. * Try to get rid of all of the unwanted dquots. The idea is to
  493. * get them off mplist and hashlist, but leave them on freelist.
  494. */
  495. list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) {
  496. /*
  497. * It's OK to look at the type without taking dqlock here.
  498. * We're holding the mplist lock here, and that's needed for
  499. * a dqreclaim.
  500. */
  501. if ((dqp->dq_flags & dqtype) == 0)
  502. continue;
  503. if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
  504. nrecl = q->qi_dqreclaims;
  505. mutex_unlock(&q->qi_dqlist_lock);
  506. mutex_lock(&dqp->q_hash->qh_lock);
  507. mutex_lock(&q->qi_dqlist_lock);
  508. /*
  509. * XXXTheoretically, we can get into a very long
  510. * ping pong game here.
  511. * No one can be adding dquots to the mplist at
  512. * this point, but somebody might be taking things off.
  513. */
  514. if (nrecl != q->qi_dqreclaims) {
  515. mutex_unlock(&dqp->q_hash->qh_lock);
  516. goto again;
  517. }
  518. }
  519. /*
  520. * Take the dquot off the mplist and hashlist. It may remain on
  521. * freelist in INACTIVE state.
  522. */
  523. nmisses += xfs_qm_dqpurge(dqp);
  524. }
  525. mutex_unlock(&q->qi_dqlist_lock);
  526. return nmisses;
  527. }
  528. int
  529. xfs_qm_dqpurge_all(
  530. xfs_mount_t *mp,
  531. uint flags)
  532. {
  533. int ndquots;
  534. /*
  535. * Purge the dquot cache.
  536. * None of the dquots should really be busy at this point.
  537. */
  538. if (mp->m_quotainfo) {
  539. while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) {
  540. delay(ndquots * 10);
  541. }
  542. }
  543. return 0;
  544. }
  545. STATIC int
  546. xfs_qm_dqattach_one(
  547. xfs_inode_t *ip,
  548. xfs_dqid_t id,
  549. uint type,
  550. uint doalloc,
  551. xfs_dquot_t *udqhint, /* hint */
  552. xfs_dquot_t **IO_idqpp)
  553. {
  554. xfs_dquot_t *dqp;
  555. int error;
  556. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  557. error = 0;
  558. /*
  559. * See if we already have it in the inode itself. IO_idqpp is
  560. * &i_udquot or &i_gdquot. This made the code look weird, but
  561. * made the logic a lot simpler.
  562. */
  563. dqp = *IO_idqpp;
  564. if (dqp) {
  565. trace_xfs_dqattach_found(dqp);
  566. return 0;
  567. }
  568. /*
  569. * udqhint is the i_udquot field in inode, and is non-NULL only
  570. * when the type arg is group/project. Its purpose is to save a
  571. * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
  572. * the user dquot.
  573. */
  574. if (udqhint) {
  575. ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
  576. xfs_dqlock(udqhint);
  577. /*
  578. * No need to take dqlock to look at the id.
  579. *
  580. * The ID can't change until it gets reclaimed, and it won't
  581. * be reclaimed as long as we have a ref from inode and we
  582. * hold the ilock.
  583. */
  584. dqp = udqhint->q_gdquot;
  585. if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
  586. xfs_dqlock(dqp);
  587. XFS_DQHOLD(dqp);
  588. ASSERT(*IO_idqpp == NULL);
  589. *IO_idqpp = dqp;
  590. xfs_dqunlock(dqp);
  591. xfs_dqunlock(udqhint);
  592. return 0;
  593. }
  594. /*
  595. * We can't hold a dquot lock when we call the dqget code.
  596. * We'll deadlock in no time, because of (not conforming to)
  597. * lock ordering - the inodelock comes before any dquot lock,
  598. * and we may drop and reacquire the ilock in xfs_qm_dqget().
  599. */
  600. xfs_dqunlock(udqhint);
  601. }
  602. /*
  603. * Find the dquot from somewhere. This bumps the
  604. * reference count of dquot and returns it locked.
  605. * This can return ENOENT if dquot didn't exist on
  606. * disk and we didn't ask it to allocate;
  607. * ESRCH if quotas got turned off suddenly.
  608. */
  609. error = xfs_qm_dqget(ip->i_mount, ip, id, type,
  610. doalloc | XFS_QMOPT_DOWARN, &dqp);
  611. if (error)
  612. return error;
  613. trace_xfs_dqattach_get(dqp);
  614. /*
  615. * dqget may have dropped and re-acquired the ilock, but it guarantees
  616. * that the dquot returned is the one that should go in the inode.
  617. */
  618. *IO_idqpp = dqp;
  619. xfs_dqunlock(dqp);
  620. return 0;
  621. }
  622. /*
  623. * Given a udquot and gdquot, attach a ptr to the group dquot in the
  624. * udquot as a hint for future lookups. The idea sounds simple, but the
  625. * execution isn't, because the udquot might have a group dquot attached
  626. * already and getting rid of that gets us into lock ordering constraints.
  627. * The process is complicated more by the fact that the dquots may or may not
  628. * be locked on entry.
  629. */
  630. STATIC void
  631. xfs_qm_dqattach_grouphint(
  632. xfs_dquot_t *udq,
  633. xfs_dquot_t *gdq)
  634. {
  635. xfs_dquot_t *tmp;
  636. xfs_dqlock(udq);
  637. if ((tmp = udq->q_gdquot)) {
  638. if (tmp == gdq) {
  639. xfs_dqunlock(udq);
  640. return;
  641. }
  642. udq->q_gdquot = NULL;
  643. /*
  644. * We can't keep any dqlocks when calling dqrele,
  645. * because the freelist lock comes before dqlocks.
  646. */
  647. xfs_dqunlock(udq);
  648. /*
  649. * we took a hard reference once upon a time in dqget,
  650. * so give it back when the udquot no longer points at it
  651. * dqput() does the unlocking of the dquot.
  652. */
  653. xfs_qm_dqrele(tmp);
  654. xfs_dqlock(udq);
  655. xfs_dqlock(gdq);
  656. } else {
  657. ASSERT(XFS_DQ_IS_LOCKED(udq));
  658. xfs_dqlock(gdq);
  659. }
  660. ASSERT(XFS_DQ_IS_LOCKED(udq));
  661. ASSERT(XFS_DQ_IS_LOCKED(gdq));
  662. /*
  663. * Somebody could have attached a gdquot here,
  664. * when we dropped the uqlock. If so, just do nothing.
  665. */
  666. if (udq->q_gdquot == NULL) {
  667. XFS_DQHOLD(gdq);
  668. udq->q_gdquot = gdq;
  669. }
  670. xfs_dqunlock(gdq);
  671. xfs_dqunlock(udq);
  672. }
  673. /*
  674. * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
  675. * into account.
  676. * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
  677. * Inode may get unlocked and relocked in here, and the caller must deal with
  678. * the consequences.
  679. */
  680. int
  681. xfs_qm_dqattach_locked(
  682. xfs_inode_t *ip,
  683. uint flags)
  684. {
  685. xfs_mount_t *mp = ip->i_mount;
  686. uint nquotas = 0;
  687. int error = 0;
  688. if (!XFS_IS_QUOTA_RUNNING(mp) ||
  689. !XFS_IS_QUOTA_ON(mp) ||
  690. !XFS_NOT_DQATTACHED(mp, ip) ||
  691. ip->i_ino == mp->m_sb.sb_uquotino ||
  692. ip->i_ino == mp->m_sb.sb_gquotino)
  693. return 0;
  694. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  695. if (XFS_IS_UQUOTA_ON(mp)) {
  696. error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
  697. flags & XFS_QMOPT_DQALLOC,
  698. NULL, &ip->i_udquot);
  699. if (error)
  700. goto done;
  701. nquotas++;
  702. }
  703. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  704. if (XFS_IS_OQUOTA_ON(mp)) {
  705. error = XFS_IS_GQUOTA_ON(mp) ?
  706. xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
  707. flags & XFS_QMOPT_DQALLOC,
  708. ip->i_udquot, &ip->i_gdquot) :
  709. xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
  710. flags & XFS_QMOPT_DQALLOC,
  711. ip->i_udquot, &ip->i_gdquot);
  712. /*
  713. * Don't worry about the udquot that we may have
  714. * attached above. It'll get detached, if not already.
  715. */
  716. if (error)
  717. goto done;
  718. nquotas++;
  719. }
  720. /*
  721. * Attach this group quota to the user quota as a hint.
  722. * This WON'T, in general, result in a thrash.
  723. */
  724. if (nquotas == 2) {
  725. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  726. ASSERT(ip->i_udquot);
  727. ASSERT(ip->i_gdquot);
  728. /*
  729. * We may or may not have the i_udquot locked at this point,
  730. * but this check is OK since we don't depend on the i_gdquot to
  731. * be accurate 100% all the time. It is just a hint, and this
  732. * will succeed in general.
  733. */
  734. if (ip->i_udquot->q_gdquot == ip->i_gdquot)
  735. goto done;
  736. /*
  737. * Attach i_gdquot to the gdquot hint inside the i_udquot.
  738. */
  739. xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
  740. }
  741. done:
  742. #ifdef DEBUG
  743. if (!error) {
  744. if (XFS_IS_UQUOTA_ON(mp))
  745. ASSERT(ip->i_udquot);
  746. if (XFS_IS_OQUOTA_ON(mp))
  747. ASSERT(ip->i_gdquot);
  748. }
  749. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  750. #endif
  751. return error;
  752. }
  753. int
  754. xfs_qm_dqattach(
  755. struct xfs_inode *ip,
  756. uint flags)
  757. {
  758. int error;
  759. xfs_ilock(ip, XFS_ILOCK_EXCL);
  760. error = xfs_qm_dqattach_locked(ip, flags);
  761. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  762. return error;
  763. }
  764. /*
  765. * Release dquots (and their references) if any.
  766. * The inode should be locked EXCL except when this's called by
  767. * xfs_ireclaim.
  768. */
  769. void
  770. xfs_qm_dqdetach(
  771. xfs_inode_t *ip)
  772. {
  773. if (!(ip->i_udquot || ip->i_gdquot))
  774. return;
  775. trace_xfs_dquot_dqdetach(ip);
  776. ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
  777. ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
  778. if (ip->i_udquot) {
  779. xfs_qm_dqrele(ip->i_udquot);
  780. ip->i_udquot = NULL;
  781. }
  782. if (ip->i_gdquot) {
  783. xfs_qm_dqrele(ip->i_gdquot);
  784. ip->i_gdquot = NULL;
  785. }
  786. }
  787. int
  788. xfs_qm_sync(
  789. struct xfs_mount *mp,
  790. int flags)
  791. {
  792. struct xfs_quotainfo *q = mp->m_quotainfo;
  793. int recl, restarts;
  794. struct xfs_dquot *dqp;
  795. int error;
  796. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  797. return 0;
  798. restarts = 0;
  799. again:
  800. mutex_lock(&q->qi_dqlist_lock);
  801. /*
  802. * dqpurge_all() also takes the mplist lock and iterate thru all dquots
  803. * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
  804. * when we have the mplist lock, we know that dquots will be consistent
  805. * as long as we have it locked.
  806. */
  807. if (!XFS_IS_QUOTA_ON(mp)) {
  808. mutex_unlock(&q->qi_dqlist_lock);
  809. return 0;
  810. }
  811. ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
  812. list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
  813. /*
  814. * If this is vfs_sync calling, then skip the dquots that
  815. * don't 'seem' to be dirty. ie. don't acquire dqlock.
  816. * This is very similar to what xfs_sync does with inodes.
  817. */
  818. if (flags & SYNC_TRYLOCK) {
  819. if (!XFS_DQ_IS_DIRTY(dqp))
  820. continue;
  821. if (!xfs_qm_dqlock_nowait(dqp))
  822. continue;
  823. } else {
  824. xfs_dqlock(dqp);
  825. }
  826. /*
  827. * Now, find out for sure if this dquot is dirty or not.
  828. */
  829. if (! XFS_DQ_IS_DIRTY(dqp)) {
  830. xfs_dqunlock(dqp);
  831. continue;
  832. }
  833. /* XXX a sentinel would be better */
  834. recl = q->qi_dqreclaims;
  835. if (!xfs_dqflock_nowait(dqp)) {
  836. if (flags & SYNC_TRYLOCK) {
  837. xfs_dqunlock(dqp);
  838. continue;
  839. }
  840. /*
  841. * If we can't grab the flush lock then if the caller
  842. * really wanted us to give this our best shot, so
  843. * see if we can give a push to the buffer before we wait
  844. * on the flush lock. At this point, we know that
  845. * even though the dquot is being flushed,
  846. * it has (new) dirty data.
  847. */
  848. xfs_qm_dqflock_pushbuf_wait(dqp);
  849. }
  850. /*
  851. * Let go of the mplist lock. We don't want to hold it
  852. * across a disk write
  853. */
  854. mutex_unlock(&q->qi_dqlist_lock);
  855. error = xfs_qm_dqflush(dqp, flags);
  856. xfs_dqunlock(dqp);
  857. if (error && XFS_FORCED_SHUTDOWN(mp))
  858. return 0; /* Need to prevent umount failure */
  859. else if (error)
  860. return error;
  861. mutex_lock(&q->qi_dqlist_lock);
  862. if (recl != q->qi_dqreclaims) {
  863. if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
  864. break;
  865. mutex_unlock(&q->qi_dqlist_lock);
  866. goto again;
  867. }
  868. }
  869. mutex_unlock(&q->qi_dqlist_lock);
  870. return 0;
  871. }
  872. /*
  873. * The hash chains and the mplist use the same xfs_dqhash structure as
  874. * their list head, but we can take the mplist qh_lock and one of the
  875. * hash qh_locks at the same time without any problem as they aren't
  876. * related.
  877. */
  878. static struct lock_class_key xfs_quota_mplist_class;
  879. /*
  880. * This initializes all the quota information that's kept in the
  881. * mount structure
  882. */
  883. STATIC int
  884. xfs_qm_init_quotainfo(
  885. xfs_mount_t *mp)
  886. {
  887. xfs_quotainfo_t *qinf;
  888. int error;
  889. xfs_dquot_t *dqp;
  890. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  891. /*
  892. * Tell XQM that we exist as soon as possible.
  893. */
  894. if ((error = xfs_qm_hold_quotafs_ref(mp))) {
  895. return error;
  896. }
  897. qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
  898. /*
  899. * See if quotainodes are setup, and if not, allocate them,
  900. * and change the superblock accordingly.
  901. */
  902. if ((error = xfs_qm_init_quotainos(mp))) {
  903. kmem_free(qinf);
  904. mp->m_quotainfo = NULL;
  905. return error;
  906. }
  907. INIT_LIST_HEAD(&qinf->qi_dqlist);
  908. mutex_init(&qinf->qi_dqlist_lock);
  909. lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class);
  910. qinf->qi_dqreclaims = 0;
  911. /* mutex used to serialize quotaoffs */
  912. mutex_init(&qinf->qi_quotaofflock);
  913. /* Precalc some constants */
  914. qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
  915. ASSERT(qinf->qi_dqchunklen);
  916. qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
  917. do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
  918. mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
  919. /*
  920. * We try to get the limits from the superuser's limits fields.
  921. * This is quite hacky, but it is standard quota practice.
  922. * We look at the USR dquot with id == 0 first, but if user quotas
  923. * are not enabled we goto the GRP dquot with id == 0.
  924. * We don't really care to keep separate default limits for user
  925. * and group quotas, at least not at this point.
  926. */
  927. error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0,
  928. XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
  929. (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
  930. XFS_DQ_PROJ),
  931. XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN,
  932. &dqp);
  933. if (! error) {
  934. xfs_disk_dquot_t *ddqp = &dqp->q_core;
  935. /*
  936. * The warnings and timers set the grace period given to
  937. * a user or group before he or she can not perform any
  938. * more writing. If it is zero, a default is used.
  939. */
  940. qinf->qi_btimelimit = ddqp->d_btimer ?
  941. be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
  942. qinf->qi_itimelimit = ddqp->d_itimer ?
  943. be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
  944. qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
  945. be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
  946. qinf->qi_bwarnlimit = ddqp->d_bwarns ?
  947. be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
  948. qinf->qi_iwarnlimit = ddqp->d_iwarns ?
  949. be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
  950. qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
  951. be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
  952. qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
  953. qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
  954. qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
  955. qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
  956. qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
  957. qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
  958. /*
  959. * We sent the XFS_QMOPT_DQSUSER flag to dqget because
  960. * we don't want this dquot cached. We haven't done a
  961. * quotacheck yet, and quotacheck doesn't like incore dquots.
  962. */
  963. xfs_qm_dqdestroy(dqp);
  964. } else {
  965. qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
  966. qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
  967. qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
  968. qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
  969. qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
  970. qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
  971. }
  972. return 0;
  973. }
  974. /*
  975. * Gets called when unmounting a filesystem or when all quotas get
  976. * turned off.
  977. * This purges the quota inodes, destroys locks and frees itself.
  978. */
  979. void
  980. xfs_qm_destroy_quotainfo(
  981. xfs_mount_t *mp)
  982. {
  983. xfs_quotainfo_t *qi;
  984. qi = mp->m_quotainfo;
  985. ASSERT(qi != NULL);
  986. ASSERT(xfs_Gqm != NULL);
  987. /*
  988. * Release the reference that XQM kept, so that we know
  989. * when the XQM structure should be freed. We cannot assume
  990. * that xfs_Gqm is non-null after this point.
  991. */
  992. xfs_qm_rele_quotafs_ref(mp);
  993. ASSERT(list_empty(&qi->qi_dqlist));
  994. mutex_destroy(&qi->qi_dqlist_lock);
  995. if (qi->qi_uquotaip) {
  996. IRELE(qi->qi_uquotaip);
  997. qi->qi_uquotaip = NULL; /* paranoia */
  998. }
  999. if (qi->qi_gquotaip) {
  1000. IRELE(qi->qi_gquotaip);
  1001. qi->qi_gquotaip = NULL;
  1002. }
  1003. mutex_destroy(&qi->qi_quotaofflock);
  1004. kmem_free(qi);
  1005. mp->m_quotainfo = NULL;
  1006. }
  1007. /* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */
  1008. /* ARGSUSED */
  1009. STATIC void
  1010. xfs_qm_list_init(
  1011. xfs_dqlist_t *list,
  1012. char *str,
  1013. int n)
  1014. {
  1015. mutex_init(&list->qh_lock);
  1016. INIT_LIST_HEAD(&list->qh_list);
  1017. list->qh_version = 0;
  1018. list->qh_nelems = 0;
  1019. }
  1020. STATIC void
  1021. xfs_qm_list_destroy(
  1022. xfs_dqlist_t *list)
  1023. {
  1024. mutex_destroy(&(list->qh_lock));
  1025. }
  1026. /*
  1027. * Create an inode and return with a reference already taken, but unlocked
  1028. * This is how we create quota inodes
  1029. */
  1030. STATIC int
  1031. xfs_qm_qino_alloc(
  1032. xfs_mount_t *mp,
  1033. xfs_inode_t **ip,
  1034. __int64_t sbfields,
  1035. uint flags)
  1036. {
  1037. xfs_trans_t *tp;
  1038. int error;
  1039. int committed;
  1040. tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
  1041. if ((error = xfs_trans_reserve(tp,
  1042. XFS_QM_QINOCREATE_SPACE_RES(mp),
  1043. XFS_CREATE_LOG_RES(mp), 0,
  1044. XFS_TRANS_PERM_LOG_RES,
  1045. XFS_CREATE_LOG_COUNT))) {
  1046. xfs_trans_cancel(tp, 0);
  1047. return error;
  1048. }
  1049. error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);
  1050. if (error) {
  1051. xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
  1052. XFS_TRANS_ABORT);
  1053. return error;
  1054. }
  1055. /*
  1056. * Make the changes in the superblock, and log those too.
  1057. * sbfields arg may contain fields other than *QUOTINO;
  1058. * VERSIONNUM for example.
  1059. */
  1060. spin_lock(&mp->m_sb_lock);
  1061. if (flags & XFS_QMOPT_SBVERSION) {
  1062. ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
  1063. ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  1064. XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
  1065. (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  1066. XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
  1067. xfs_sb_version_addquota(&mp->m_sb);
  1068. mp->m_sb.sb_uquotino = NULLFSINO;
  1069. mp->m_sb.sb_gquotino = NULLFSINO;
  1070. /* qflags will get updated _after_ quotacheck */
  1071. mp->m_sb.sb_qflags = 0;
  1072. }
  1073. if (flags & XFS_QMOPT_UQUOTA)
  1074. mp->m_sb.sb_uquotino = (*ip)->i_ino;
  1075. else
  1076. mp->m_sb.sb_gquotino = (*ip)->i_ino;
  1077. spin_unlock(&mp->m_sb_lock);
  1078. xfs_mod_sb(tp, sbfields);
  1079. if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
  1080. xfs_alert(mp, "%s failed (error %d)!", __func__, error);
  1081. return error;
  1082. }
  1083. return 0;
  1084. }
  1085. STATIC void
  1086. xfs_qm_reset_dqcounts(
  1087. xfs_mount_t *mp,
  1088. xfs_buf_t *bp,
  1089. xfs_dqid_t id,
  1090. uint type)
  1091. {
  1092. xfs_disk_dquot_t *ddq;
  1093. int j;
  1094. trace_xfs_reset_dqcounts(bp, _RET_IP_);
  1095. /*
  1096. * Reset all counters and timers. They'll be
  1097. * started afresh by xfs_qm_quotacheck.
  1098. */
  1099. #ifdef DEBUG
  1100. j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
  1101. do_div(j, sizeof(xfs_dqblk_t));
  1102. ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
  1103. #endif
  1104. ddq = bp->b_addr;
  1105. for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
  1106. /*
  1107. * Do a sanity check, and if needed, repair the dqblk. Don't
  1108. * output any warnings because it's perfectly possible to
  1109. * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
  1110. */
  1111. (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
  1112. "xfs_quotacheck");
  1113. ddq->d_bcount = 0;
  1114. ddq->d_icount = 0;
  1115. ddq->d_rtbcount = 0;
  1116. ddq->d_btimer = 0;
  1117. ddq->d_itimer = 0;
  1118. ddq->d_rtbtimer = 0;
  1119. ddq->d_bwarns = 0;
  1120. ddq->d_iwarns = 0;
  1121. ddq->d_rtbwarns = 0;
  1122. ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
  1123. }
  1124. }
  1125. STATIC int
  1126. xfs_qm_dqiter_bufs(
  1127. xfs_mount_t *mp,
  1128. xfs_dqid_t firstid,
  1129. xfs_fsblock_t bno,
  1130. xfs_filblks_t blkcnt,
  1131. uint flags)
  1132. {
  1133. xfs_buf_t *bp;
  1134. int error;
  1135. int type;
  1136. ASSERT(blkcnt > 0);
  1137. type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
  1138. (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
  1139. error = 0;
  1140. /*
  1141. * Blkcnt arg can be a very big number, and might even be
  1142. * larger than the log itself. So, we have to break it up into
  1143. * manageable-sized transactions.
  1144. * Note that we don't start a permanent transaction here; we might
  1145. * not be able to get a log reservation for the whole thing up front,
  1146. * and we don't really care to either, because we just discard
  1147. * everything if we were to crash in the middle of this loop.
  1148. */
  1149. while (blkcnt--) {
  1150. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
  1151. XFS_FSB_TO_DADDR(mp, bno),
  1152. mp->m_quotainfo->qi_dqchunklen, 0, &bp);
  1153. if (error)
  1154. break;
  1155. xfs_qm_reset_dqcounts(mp, bp, firstid, type);
  1156. xfs_buf_delwri_queue(bp);
  1157. xfs_buf_relse(bp);
  1158. /*
  1159. * goto the next block.
  1160. */
  1161. bno++;
  1162. firstid += mp->m_quotainfo->qi_dqperchunk;
  1163. }
  1164. return error;
  1165. }
  1166. /*
  1167. * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
  1168. * caller supplied function for every chunk of dquots that we find.
  1169. */
  1170. STATIC int
  1171. xfs_qm_dqiterate(
  1172. xfs_mount_t *mp,
  1173. xfs_inode_t *qip,
  1174. uint flags)
  1175. {
  1176. xfs_bmbt_irec_t *map;
  1177. int i, nmaps; /* number of map entries */
  1178. int error; /* return value */
  1179. xfs_fileoff_t lblkno;
  1180. xfs_filblks_t maxlblkcnt;
  1181. xfs_dqid_t firstid;
  1182. xfs_fsblock_t rablkno;
  1183. xfs_filblks_t rablkcnt;
  1184. error = 0;
  1185. /*
  1186. * This looks racy, but we can't keep an inode lock across a
  1187. * trans_reserve. But, this gets called during quotacheck, and that
  1188. * happens only at mount time which is single threaded.
  1189. */
  1190. if (qip->i_d.di_nblocks == 0)
  1191. return 0;
  1192. map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
  1193. lblkno = 0;
  1194. maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
  1195. do {
  1196. nmaps = XFS_DQITER_MAP_SIZE;
  1197. /*
  1198. * We aren't changing the inode itself. Just changing
  1199. * some of its data. No new blocks are added here, and
  1200. * the inode is never added to the transaction.
  1201. */
  1202. xfs_ilock(qip, XFS_ILOCK_SHARED);
  1203. error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
  1204. map, &nmaps, 0);
  1205. xfs_iunlock(qip, XFS_ILOCK_SHARED);
  1206. if (error)
  1207. break;
  1208. ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
  1209. for (i = 0; i < nmaps; i++) {
  1210. ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
  1211. ASSERT(map[i].br_blockcount);
  1212. lblkno += map[i].br_blockcount;
  1213. if (map[i].br_startblock == HOLESTARTBLOCK)
  1214. continue;
  1215. firstid = (xfs_dqid_t) map[i].br_startoff *
  1216. mp->m_quotainfo->qi_dqperchunk;
  1217. /*
  1218. * Do a read-ahead on the next extent.
  1219. */
  1220. if ((i+1 < nmaps) &&
  1221. (map[i+1].br_startblock != HOLESTARTBLOCK)) {
  1222. rablkcnt = map[i+1].br_blockcount;
  1223. rablkno = map[i+1].br_startblock;
  1224. while (rablkcnt--) {
  1225. xfs_buf_readahead(mp->m_ddev_targp,
  1226. XFS_FSB_TO_DADDR(mp, rablkno),
  1227. mp->m_quotainfo->qi_dqchunklen);
  1228. rablkno++;
  1229. }
  1230. }
  1231. /*
  1232. * Iterate thru all the blks in the extent and
  1233. * reset the counters of all the dquots inside them.
  1234. */
  1235. if ((error = xfs_qm_dqiter_bufs(mp,
  1236. firstid,
  1237. map[i].br_startblock,
  1238. map[i].br_blockcount,
  1239. flags))) {
  1240. break;
  1241. }
  1242. }
  1243. if (error)
  1244. break;
  1245. } while (nmaps > 0);
  1246. kmem_free(map);
  1247. return error;
  1248. }
  1249. /*
  1250. * Called by dqusage_adjust in doing a quotacheck.
  1251. *
  1252. * Given the inode, and a dquot id this updates both the incore dqout as well
  1253. * as the buffer copy. This is so that once the quotacheck is done, we can
  1254. * just log all the buffers, as opposed to logging numerous updates to
  1255. * individual dquots.
  1256. */
  1257. STATIC int
  1258. xfs_qm_quotacheck_dqadjust(
  1259. struct xfs_inode *ip,
  1260. xfs_dqid_t id,
  1261. uint type,
  1262. xfs_qcnt_t nblks,
  1263. xfs_qcnt_t rtblks)
  1264. {
  1265. struct xfs_mount *mp = ip->i_mount;
  1266. struct xfs_dquot *dqp;
  1267. int error;
  1268. error = xfs_qm_dqget(mp, ip, id, type,
  1269. XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
  1270. if (error) {
  1271. /*
  1272. * Shouldn't be able to turn off quotas here.
  1273. */
  1274. ASSERT(error != ESRCH);
  1275. ASSERT(error != ENOENT);
  1276. return error;
  1277. }
  1278. trace_xfs_dqadjust(dqp);
  1279. /*
  1280. * Adjust the inode count and the block count to reflect this inode's
  1281. * resource usage.
  1282. */
  1283. be64_add_cpu(&dqp->q_core.d_icount, 1);
  1284. dqp->q_res_icount++;
  1285. if (nblks) {
  1286. be64_add_cpu(&dqp->q_core.d_bcount, nblks);
  1287. dqp->q_res_bcount += nblks;
  1288. }
  1289. if (rtblks) {
  1290. be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
  1291. dqp->q_res_rtbcount += rtblks;
  1292. }
  1293. /*
  1294. * Set default limits, adjust timers (since we changed usages)
  1295. *
  1296. * There are no timers for the default values set in the root dquot.
  1297. */
  1298. if (dqp->q_core.d_id) {
  1299. xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
  1300. xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
  1301. }
  1302. dqp->dq_flags |= XFS_DQ_DIRTY;
  1303. xfs_qm_dqput(dqp);
  1304. return 0;
  1305. }
  1306. STATIC int
  1307. xfs_qm_get_rtblks(
  1308. xfs_inode_t *ip,
  1309. xfs_qcnt_t *O_rtblks)
  1310. {
  1311. xfs_filblks_t rtblks; /* total rt blks */
  1312. xfs_extnum_t idx; /* extent record index */
  1313. xfs_ifork_t *ifp; /* inode fork pointer */
  1314. xfs_extnum_t nextents; /* number of extent entries */
  1315. int error;
  1316. ASSERT(XFS_IS_REALTIME_INODE(ip));
  1317. ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
  1318. if (!(ifp->if_flags & XFS_IFEXTENTS)) {
  1319. if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
  1320. return error;
  1321. }
  1322. rtblks = 0;
  1323. nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
  1324. for (idx = 0; idx < nextents; idx++)
  1325. rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
  1326. *O_rtblks = (xfs_qcnt_t)rtblks;
  1327. return 0;
  1328. }
  1329. /*
  1330. * callback routine supplied to bulkstat(). Given an inumber, find its
  1331. * dquots and update them to account for resources taken by that inode.
  1332. */
  1333. /* ARGSUSED */
  1334. STATIC int
  1335. xfs_qm_dqusage_adjust(
  1336. xfs_mount_t *mp, /* mount point for filesystem */
  1337. xfs_ino_t ino, /* inode number to get data for */
  1338. void __user *buffer, /* not used */
  1339. int ubsize, /* not used */
  1340. int *ubused, /* not used */
  1341. int *res) /* result code value */
  1342. {
  1343. xfs_inode_t *ip;
  1344. xfs_qcnt_t nblks, rtblks = 0;
  1345. int error;
  1346. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1347. /*
  1348. * rootino must have its resources accounted for, not so with the quota
  1349. * inodes.
  1350. */
  1351. if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
  1352. *res = BULKSTAT_RV_NOTHING;
  1353. return XFS_ERROR(EINVAL);
  1354. }
  1355. /*
  1356. * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
  1357. * interface expects the inode to be exclusively locked because that's
  1358. * the case in all other instances. It's OK that we do this because
  1359. * quotacheck is done only at mount time.
  1360. */
  1361. error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
  1362. if (error) {
  1363. *res = BULKSTAT_RV_NOTHING;
  1364. return error;
  1365. }
  1366. ASSERT(ip->i_delayed_blks == 0);
  1367. if (XFS_IS_REALTIME_INODE(ip)) {
  1368. /*
  1369. * Walk thru the extent list and count the realtime blocks.
  1370. */
  1371. error = xfs_qm_get_rtblks(ip, &rtblks);
  1372. if (error)
  1373. goto error0;
  1374. }
  1375. nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
  1376. /*
  1377. * Add the (disk blocks and inode) resources occupied by this
  1378. * inode to its dquots. We do this adjustment in the incore dquot,
  1379. * and also copy the changes to its buffer.
  1380. * We don't care about putting these changes in a transaction
  1381. * envelope because if we crash in the middle of a 'quotacheck'
  1382. * we have to start from the beginning anyway.
  1383. * Once we're done, we'll log all the dquot bufs.
  1384. *
  1385. * The *QUOTA_ON checks below may look pretty racy, but quotachecks
  1386. * and quotaoffs don't race. (Quotachecks happen at mount time only).
  1387. */
  1388. if (XFS_IS_UQUOTA_ON(mp)) {
  1389. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
  1390. XFS_DQ_USER, nblks, rtblks);
  1391. if (error)
  1392. goto error0;
  1393. }
  1394. if (XFS_IS_GQUOTA_ON(mp)) {
  1395. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
  1396. XFS_DQ_GROUP, nblks, rtblks);
  1397. if (error)
  1398. goto error0;
  1399. }
  1400. if (XFS_IS_PQUOTA_ON(mp)) {
  1401. error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
  1402. XFS_DQ_PROJ, nblks, rtblks);
  1403. if (error)
  1404. goto error0;
  1405. }
  1406. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1407. IRELE(ip);
  1408. *res = BULKSTAT_RV_DIDONE;
  1409. return 0;
  1410. error0:
  1411. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1412. IRELE(ip);
  1413. *res = BULKSTAT_RV_GIVEUP;
  1414. return error;
  1415. }
  1416. /*
  1417. * Walk thru all the filesystem inodes and construct a consistent view
  1418. * of the disk quota world. If the quotacheck fails, disable quotas.
  1419. */
  1420. int
  1421. xfs_qm_quotacheck(
  1422. xfs_mount_t *mp)
  1423. {
  1424. int done, count, error;
  1425. xfs_ino_t lastino;
  1426. size_t structsz;
  1427. xfs_inode_t *uip, *gip;
  1428. uint flags;
  1429. count = INT_MAX;
  1430. structsz = 1;
  1431. lastino = 0;
  1432. flags = 0;
  1433. ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
  1434. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1435. /*
  1436. * There should be no cached dquots. The (simplistic) quotacheck
  1437. * algorithm doesn't like that.
  1438. */
  1439. ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist));
  1440. xfs_notice(mp, "Quotacheck needed: Please wait.");
  1441. /*
  1442. * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
  1443. * their counters to zero. We need a clean slate.
  1444. * We don't log our changes till later.
  1445. */
  1446. uip = mp->m_quotainfo->qi_uquotaip;
  1447. if (uip) {
  1448. error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA);
  1449. if (error)
  1450. goto error_return;
  1451. flags |= XFS_UQUOTA_CHKD;
  1452. }
  1453. gip = mp->m_quotainfo->qi_gquotaip;
  1454. if (gip) {
  1455. error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
  1456. XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
  1457. if (error)
  1458. goto error_return;
  1459. flags |= XFS_OQUOTA_CHKD;
  1460. }
  1461. do {
  1462. /*
  1463. * Iterate thru all the inodes in the file system,
  1464. * adjusting the corresponding dquot counters in core.
  1465. */
  1466. error = xfs_bulkstat(mp, &lastino, &count,
  1467. xfs_qm_dqusage_adjust,
  1468. structsz, NULL, &done);
  1469. if (error)
  1470. break;
  1471. } while (!done);
  1472. /*
  1473. * We've made all the changes that we need to make incore.
  1474. * Flush them down to disk buffers if everything was updated
  1475. * successfully.
  1476. */
  1477. if (!error)
  1478. error = xfs_qm_dqflush_all(mp, 0);
  1479. /*
  1480. * We can get this error if we couldn't do a dquot allocation inside
  1481. * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
  1482. * dirty dquots that might be cached, we just want to get rid of them
  1483. * and turn quotaoff. The dquots won't be attached to any of the inodes
  1484. * at this point (because we intentionally didn't in dqget_noattach).
  1485. */
  1486. if (error) {
  1487. xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
  1488. goto error_return;
  1489. }
  1490. /*
  1491. * We didn't log anything, because if we crashed, we'll have to
  1492. * start the quotacheck from scratch anyway. However, we must make
  1493. * sure that our dquot changes are secure before we put the
  1494. * quotacheck'd stamp on the superblock. So, here we do a synchronous
  1495. * flush.
  1496. */
  1497. xfs_flush_buftarg(mp->m_ddev_targp, 1);
  1498. /*
  1499. * If one type of quotas is off, then it will lose its
  1500. * quotachecked status, since we won't be doing accounting for
  1501. * that type anymore.
  1502. */
  1503. mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
  1504. mp->m_qflags |= flags;
  1505. error_return:
  1506. if (error) {
  1507. xfs_warn(mp,
  1508. "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
  1509. error);
  1510. /*
  1511. * We must turn off quotas.
  1512. */
  1513. ASSERT(mp->m_quotainfo != NULL);
  1514. ASSERT(xfs_Gqm != NULL);
  1515. xfs_qm_destroy_quotainfo(mp);
  1516. if (xfs_mount_reset_sbqflags(mp)) {
  1517. xfs_warn(mp,
  1518. "Quotacheck: Failed to reset quota flags.");
  1519. }
  1520. } else
  1521. xfs_notice(mp, "Quotacheck: Done.");
  1522. return (error);
  1523. }
  1524. /*
  1525. * This is called after the superblock has been read in and we're ready to
  1526. * iget the quota inodes.
  1527. */
  1528. STATIC int
  1529. xfs_qm_init_quotainos(
  1530. xfs_mount_t *mp)
  1531. {
  1532. xfs_inode_t *uip, *gip;
  1533. int error;
  1534. __int64_t sbflags;
  1535. uint flags;
  1536. ASSERT(mp->m_quotainfo);
  1537. uip = gip = NULL;
  1538. sbflags = 0;
  1539. flags = 0;
  1540. /*
  1541. * Get the uquota and gquota inodes
  1542. */
  1543. if (xfs_sb_version_hasquota(&mp->m_sb)) {
  1544. if (XFS_IS_UQUOTA_ON(mp) &&
  1545. mp->m_sb.sb_uquotino != NULLFSINO) {
  1546. ASSERT(mp->m_sb.sb_uquotino > 0);
  1547. if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
  1548. 0, 0, &uip)))
  1549. return XFS_ERROR(error);
  1550. }
  1551. if (XFS_IS_OQUOTA_ON(mp) &&
  1552. mp->m_sb.sb_gquotino != NULLFSINO) {
  1553. ASSERT(mp->m_sb.sb_gquotino > 0);
  1554. if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
  1555. 0, 0, &gip))) {
  1556. if (uip)
  1557. IRELE(uip);
  1558. return XFS_ERROR(error);
  1559. }
  1560. }
  1561. } else {
  1562. flags |= XFS_QMOPT_SBVERSION;
  1563. sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  1564. XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
  1565. }
  1566. /*
  1567. * Create the two inodes, if they don't exist already. The changes
  1568. * made above will get added to a transaction and logged in one of
  1569. * the qino_alloc calls below. If the device is readonly,
  1570. * temporarily switch to read-write to do this.
  1571. */
  1572. if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
  1573. if ((error = xfs_qm_qino_alloc(mp, &uip,
  1574. sbflags | XFS_SB_UQUOTINO,
  1575. flags | XFS_QMOPT_UQUOTA)))
  1576. return XFS_ERROR(error);
  1577. flags &= ~XFS_QMOPT_SBVERSION;
  1578. }
  1579. if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
  1580. flags |= (XFS_IS_GQUOTA_ON(mp) ?
  1581. XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
  1582. error = xfs_qm_qino_alloc(mp, &gip,
  1583. sbflags | XFS_SB_GQUOTINO, flags);
  1584. if (error) {
  1585. if (uip)
  1586. IRELE(uip);
  1587. return XFS_ERROR(error);
  1588. }
  1589. }
  1590. mp->m_quotainfo->qi_uquotaip = uip;
  1591. mp->m_quotainfo->qi_gquotaip = gip;
  1592. return 0;
  1593. }
  1594. /*
  1595. * Just pop the least recently used dquot off the freelist and
  1596. * recycle it. The returned dquot is locked.
  1597. */
  1598. STATIC xfs_dquot_t *
  1599. xfs_qm_dqreclaim_one(void)
  1600. {
  1601. xfs_dquot_t *dqpout;
  1602. xfs_dquot_t *dqp;
  1603. int restarts;
  1604. int startagain;
  1605. restarts = 0;
  1606. dqpout = NULL;
  1607. /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
  1608. again:
  1609. startagain = 0;
  1610. mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
  1611. list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
  1612. struct xfs_mount *mp = dqp->q_mount;
  1613. xfs_dqlock(dqp);
  1614. /*
  1615. * We are racing with dqlookup here. Naturally we don't
  1616. * want to reclaim a dquot that lookup wants. We release the
  1617. * freelist lock and start over, so that lookup will grab
  1618. * both the dquot and the freelistlock.
  1619. */
  1620. if (dqp->dq_flags & XFS_DQ_WANT) {
  1621. ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
  1622. trace_xfs_dqreclaim_want(dqp);
  1623. XQM_STATS_INC(xqmstats.xs_qm_dqwants);
  1624. restarts++;
  1625. startagain = 1;
  1626. goto dqunlock;
  1627. }
  1628. /*
  1629. * If the dquot is inactive, we are assured that it is
  1630. * not on the mplist or the hashlist, and that makes our
  1631. * life easier.
  1632. */
  1633. if (dqp->dq_flags & XFS_DQ_INACTIVE) {
  1634. ASSERT(mp == NULL);
  1635. ASSERT(! XFS_DQ_IS_DIRTY(dqp));
  1636. ASSERT(list_empty(&dqp->q_hashlist));
  1637. ASSERT(list_empty(&dqp->q_mplist));
  1638. list_del_init(&dqp->q_freelist);
  1639. xfs_Gqm->qm_dqfrlist_cnt--;
  1640. dqpout = dqp;
  1641. XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
  1642. goto dqunlock;
  1643. }
  1644. ASSERT(dqp->q_hash);
  1645. ASSERT(!list_empty(&dqp->q_mplist));
  1646. /*
  1647. * Try to grab the flush lock. If this dquot is in the process
  1648. * of getting flushed to disk, we don't want to reclaim it.
  1649. */
  1650. if (!xfs_dqflock_nowait(dqp))
  1651. goto dqunlock;
  1652. /*
  1653. * We have the flush lock so we know that this is not in the
  1654. * process of being flushed. So, if this is dirty, flush it
  1655. * DELWRI so that we don't get a freelist infested with
  1656. * dirty dquots.
  1657. */
  1658. if (XFS_DQ_IS_DIRTY(dqp)) {
  1659. int error;
  1660. trace_xfs_dqreclaim_dirty(dqp);
  1661. /*
  1662. * We flush it delayed write, so don't bother
  1663. * releasing the freelist lock.
  1664. */
  1665. error = xfs_qm_dqflush(dqp, 0);
  1666. if (error) {
  1667. xfs_warn(mp, "%s: dquot %p flush failed",
  1668. __func__, dqp);
  1669. }
  1670. goto dqunlock;
  1671. }
  1672. /*
  1673. * We're trying to get the hashlock out of order. This races
  1674. * with dqlookup; so, we giveup and goto the next dquot if
  1675. * we couldn't get the hashlock. This way, we won't starve
  1676. * a dqlookup process that holds the hashlock that is
  1677. * waiting for the freelist lock.
  1678. */
  1679. if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
  1680. restarts++;
  1681. goto dqfunlock;
  1682. }
  1683. /*
  1684. * This races with dquot allocation code as well as dqflush_all
  1685. * and reclaim code. So, if we failed to grab the mplist lock,
  1686. * giveup everything and start over.
  1687. */
  1688. if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
  1689. restarts++;
  1690. startagain = 1;
  1691. goto qhunlock;
  1692. }
  1693. ASSERT(dqp->q_nrefs == 0);
  1694. list_del_init(&dqp->q_mplist);
  1695. mp->m_quotainfo->qi_dquots--;
  1696. mp->m_quotainfo->qi_dqreclaims++;
  1697. list_del_init(&dqp->q_hashlist);
  1698. dqp->q_hash->qh_version++;
  1699. list_del_init(&dqp->q_freelist);
  1700. xfs_Gqm->qm_dqfrlist_cnt--;
  1701. dqpout = dqp;
  1702. mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
  1703. qhunlock:
  1704. mutex_unlock(&dqp->q_hash->qh_lock);
  1705. dqfunlock:
  1706. xfs_dqfunlock(dqp);
  1707. dqunlock:
  1708. xfs_dqunlock(dqp);
  1709. if (dqpout)
  1710. break;
  1711. if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
  1712. break;
  1713. if (startagain) {
  1714. mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
  1715. goto again;
  1716. }
  1717. }
  1718. mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
  1719. return dqpout;
  1720. }
  1721. /*
  1722. * Traverse the freelist of dquots and attempt to reclaim a maximum of
  1723. * 'howmany' dquots. This operation races with dqlookup(), and attempts to
  1724. * favor the lookup function ...
  1725. */
  1726. STATIC int
  1727. xfs_qm_shake_freelist(
  1728. int howmany)
  1729. {
  1730. int nreclaimed = 0;
  1731. xfs_dquot_t *dqp;
  1732. if (howmany <= 0)
  1733. return 0;
  1734. while (nreclaimed < howmany) {
  1735. dqp = xfs_qm_dqreclaim_one();
  1736. if (!dqp)
  1737. return nreclaimed;
  1738. xfs_qm_dqdestroy(dqp);
  1739. nreclaimed++;
  1740. }
  1741. return nreclaimed;
  1742. }
  1743. /*
  1744. * The kmem_shake interface is invoked when memory is running low.
  1745. */
  1746. /* ARGSUSED */
  1747. STATIC int
  1748. xfs_qm_shake(
  1749. struct shrinker *shrink,
  1750. struct shrink_control *sc)
  1751. {
  1752. int ndqused, nfree, n;
  1753. gfp_t gfp_mask = sc->gfp_mask;
  1754. if (!kmem_shake_allow(gfp_mask))
  1755. return 0;
  1756. if (!xfs_Gqm)
  1757. return 0;
  1758. nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */
  1759. /* incore dquots in all f/s's */
  1760. ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
  1761. ASSERT(ndqused >= 0);
  1762. if (nfree <= ndqused && nfree < ndquot)
  1763. return 0;
  1764. ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */
  1765. n = nfree - ndqused - ndquot; /* # over target */
  1766. return xfs_qm_shake_freelist(MAX(nfree, n));
  1767. }
  1768. /*------------------------------------------------------------------*/
  1769. /*
  1770. * Return a new incore dquot. Depending on the number of
  1771. * dquots in the system, we either allocate a new one on the kernel heap,
  1772. * or reclaim a free one.
  1773. * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
  1774. * to reclaim an existing one from the freelist.
  1775. */
  1776. boolean_t
  1777. xfs_qm_dqalloc_incore(
  1778. xfs_dquot_t **O_dqpp)
  1779. {
  1780. xfs_dquot_t *dqp;
  1781. /*
  1782. * Check against high water mark to see if we want to pop
  1783. * a nincompoop dquot off the freelist.
  1784. */
  1785. if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) {
  1786. /*
  1787. * Try to recycle a dquot from the freelist.
  1788. */
  1789. if ((dqp = xfs_qm_dqreclaim_one())) {
  1790. XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
  1791. /*
  1792. * Just zero the core here. The rest will get
  1793. * reinitialized by caller. XXX we shouldn't even
  1794. * do this zero ...
  1795. */
  1796. memset(&dqp->q_core, 0, sizeof(dqp->q_core));
  1797. *O_dqpp = dqp;
  1798. return B_FALSE;
  1799. }
  1800. XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
  1801. }
  1802. /*
  1803. * Allocate a brand new dquot on the kernel heap and return it
  1804. * to the caller to initialize.
  1805. */
  1806. ASSERT(xfs_Gqm->qm_dqzone != NULL);
  1807. *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
  1808. atomic_inc(&xfs_Gqm->qm_totaldquots);
  1809. return B_TRUE;
  1810. }
  1811. /*
  1812. * Start a transaction and write the incore superblock changes to
  1813. * disk. flags parameter indicates which fields have changed.
  1814. */
  1815. int
  1816. xfs_qm_write_sb_changes(
  1817. xfs_mount_t *mp,
  1818. __int64_t flags)
  1819. {
  1820. xfs_trans_t *tp;
  1821. int error;
  1822. tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
  1823. if ((error = xfs_trans_reserve(tp, 0,
  1824. mp->m_sb.sb_sectsize + 128, 0,
  1825. 0,
  1826. XFS_DEFAULT_LOG_COUNT))) {
  1827. xfs_trans_cancel(tp, 0);
  1828. return error;
  1829. }
  1830. xfs_mod_sb(tp, flags);
  1831. error = xfs_trans_commit(tp, 0);
  1832. return error;
  1833. }
  1834. /* --------------- utility functions for vnodeops ---------------- */
  1835. /*
  1836. * Given an inode, a uid, gid and prid make sure that we have
  1837. * allocated relevant dquot(s) on disk, and that we won't exceed inode
  1838. * quotas by creating this file.
  1839. * This also attaches dquot(s) to the given inode after locking it,
  1840. * and returns the dquots corresponding to the uid and/or gid.
  1841. *
  1842. * in : inode (unlocked)
  1843. * out : udquot, gdquot with references taken and unlocked
  1844. */
  1845. int
  1846. xfs_qm_vop_dqalloc(
  1847. struct xfs_inode *ip,
  1848. uid_t uid,
  1849. gid_t gid,
  1850. prid_t prid,
  1851. uint flags,
  1852. struct xfs_dquot **O_udqpp,
  1853. struct xfs_dquot **O_gdqpp)
  1854. {
  1855. struct xfs_mount *mp = ip->i_mount;
  1856. struct xfs_dquot *uq, *gq;
  1857. int error;
  1858. uint lockflags;
  1859. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1860. return 0;
  1861. lockflags = XFS_ILOCK_EXCL;
  1862. xfs_ilock(ip, lockflags);
  1863. if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
  1864. gid = ip->i_d.di_gid;
  1865. /*
  1866. * Attach the dquot(s) to this inode, doing a dquot allocation
  1867. * if necessary. The dquot(s) will not be locked.
  1868. */
  1869. if (XFS_NOT_DQATTACHED(mp, ip)) {
  1870. error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
  1871. if (error) {
  1872. xfs_iunlock(ip, lockflags);
  1873. return error;
  1874. }
  1875. }
  1876. uq = gq = NULL;
  1877. if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
  1878. if (ip->i_d.di_uid != uid) {
  1879. /*
  1880. * What we need is the dquot that has this uid, and
  1881. * if we send the inode to dqget, the uid of the inode
  1882. * takes priority over what's sent in the uid argument.
  1883. * We must unlock inode here before calling dqget if
  1884. * we're not sending the inode, because otherwise
  1885. * we'll deadlock by doing trans_reserve while
  1886. * holding ilock.
  1887. */
  1888. xfs_iunlock(ip, lockflags);
  1889. if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
  1890. XFS_DQ_USER,
  1891. XFS_QMOPT_DQALLOC |
  1892. XFS_QMOPT_DOWARN,
  1893. &uq))) {
  1894. ASSERT(error != ENOENT);
  1895. return error;
  1896. }
  1897. /*
  1898. * Get the ilock in the right order.
  1899. */
  1900. xfs_dqunlock(uq);
  1901. lockflags = XFS_ILOCK_SHARED;
  1902. xfs_ilock(ip, lockflags);
  1903. } else {
  1904. /*
  1905. * Take an extra reference, because we'll return
  1906. * this to caller
  1907. */
  1908. ASSERT(ip->i_udquot);
  1909. uq = ip->i_udquot;
  1910. xfs_dqlock(uq);
  1911. XFS_DQHOLD(uq);
  1912. xfs_dqunlock(uq);
  1913. }
  1914. }
  1915. if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
  1916. if (ip->i_d.di_gid != gid) {
  1917. xfs_iunlock(ip, lockflags);
  1918. if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
  1919. XFS_DQ_GROUP,
  1920. XFS_QMOPT_DQALLOC |
  1921. XFS_QMOPT_DOWARN,
  1922. &gq))) {
  1923. if (uq)
  1924. xfs_qm_dqrele(uq);
  1925. ASSERT(error != ENOENT);
  1926. return error;
  1927. }
  1928. xfs_dqunlock(gq);
  1929. lockflags = XFS_ILOCK_SHARED;
  1930. xfs_ilock(ip, lockflags);
  1931. } else {
  1932. ASSERT(ip->i_gdquot);
  1933. gq = ip->i_gdquot;
  1934. xfs_dqlock(gq);
  1935. XFS_DQHOLD(gq);
  1936. xfs_dqunlock(gq);
  1937. }
  1938. } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
  1939. if (xfs_get_projid(ip) != prid) {
  1940. xfs_iunlock(ip, lockflags);
  1941. if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
  1942. XFS_DQ_PROJ,
  1943. XFS_QMOPT_DQALLOC |
  1944. XFS_QMOPT_DOWARN,
  1945. &gq))) {
  1946. if (uq)
  1947. xfs_qm_dqrele(uq);
  1948. ASSERT(error != ENOENT);
  1949. return (error);
  1950. }
  1951. xfs_dqunlock(gq);
  1952. lockflags = XFS_ILOCK_SHARED;
  1953. xfs_ilock(ip, lockflags);
  1954. } else {
  1955. ASSERT(ip->i_gdquot);
  1956. gq = ip->i_gdquot;
  1957. xfs_dqlock(gq);
  1958. XFS_DQHOLD(gq);
  1959. xfs_dqunlock(gq);
  1960. }
  1961. }
  1962. if (uq)
  1963. trace_xfs_dquot_dqalloc(ip);
  1964. xfs_iunlock(ip, lockflags);
  1965. if (O_udqpp)
  1966. *O_udqpp = uq;
  1967. else if (uq)
  1968. xfs_qm_dqrele(uq);
  1969. if (O_gdqpp)
  1970. *O_gdqpp = gq;
  1971. else if (gq)
  1972. xfs_qm_dqrele(gq);
  1973. return 0;
  1974. }
  1975. /*
  1976. * Actually transfer ownership, and do dquot modifications.
  1977. * These were already reserved.
  1978. */
  1979. xfs_dquot_t *
  1980. xfs_qm_vop_chown(
  1981. xfs_trans_t *tp,
  1982. xfs_inode_t *ip,
  1983. xfs_dquot_t **IO_olddq,
  1984. xfs_dquot_t *newdq)
  1985. {
  1986. xfs_dquot_t *prevdq;
  1987. uint bfield = XFS_IS_REALTIME_INODE(ip) ?
  1988. XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
  1989. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1990. ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
  1991. /* old dquot */
  1992. prevdq = *IO_olddq;
  1993. ASSERT(prevdq);
  1994. ASSERT(prevdq != newdq);
  1995. xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
  1996. xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
  1997. /* the sparkling new dquot */
  1998. xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
  1999. xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
  2000. /*
  2001. * Take an extra reference, because the inode
  2002. * is going to keep this dquot pointer even
  2003. * after the trans_commit.
  2004. */
  2005. xfs_dqlock(newdq);
  2006. XFS_DQHOLD(newdq);
  2007. xfs_dqunlock(newdq);
  2008. *IO_olddq = newdq;
  2009. return prevdq;
  2010. }
  2011. /*
  2012. * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
  2013. */
  2014. int
  2015. xfs_qm_vop_chown_reserve(
  2016. xfs_trans_t *tp,
  2017. xfs_inode_t *ip,
  2018. xfs_dquot_t *udqp,
  2019. xfs_dquot_t *gdqp,
  2020. uint flags)
  2021. {
  2022. xfs_mount_t *mp = ip->i_mount;
  2023. uint delblks, blkflags, prjflags = 0;
  2024. xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
  2025. int error;
  2026. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  2027. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  2028. delblks = ip->i_delayed_blks;
  2029. delblksudq = delblksgdq = unresudq = unresgdq = NULL;
  2030. blkflags = XFS_IS_REALTIME_INODE(ip) ?
  2031. XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
  2032. if (XFS_IS_UQUOTA_ON(mp) && udqp &&
  2033. ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) {
  2034. delblksudq = udqp;
  2035. /*
  2036. * If there are delayed allocation blocks, then we have to
  2037. * unreserve those from the old dquot, and add them to the
  2038. * new dquot.
  2039. */
  2040. if (delblks) {
  2041. ASSERT(ip->i_udquot);
  2042. unresudq = ip->i_udquot;
  2043. }
  2044. }
  2045. if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
  2046. if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
  2047. xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id))
  2048. prjflags = XFS_QMOPT_ENOSPC;
  2049. if (prjflags ||
  2050. (XFS_IS_GQUOTA_ON(ip->i_mount) &&
  2051. ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
  2052. delblksgdq = gdqp;
  2053. if (delblks) {
  2054. ASSERT(ip->i_gdquot);
  2055. unresgdq = ip->i_gdquot;
  2056. }
  2057. }
  2058. }
  2059. if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
  2060. delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
  2061. flags | blkflags | prjflags)))
  2062. return (error);
  2063. /*
  2064. * Do the delayed blks reservations/unreservations now. Since, these
  2065. * are done without the help of a transaction, if a reservation fails
  2066. * its previous reservations won't be automatically undone by trans
  2067. * code. So, we have to do it manually here.
  2068. */
  2069. if (delblks) {
  2070. /*
  2071. * Do the reservations first. Unreservation can't fail.
  2072. */
  2073. ASSERT(delblksudq || delblksgdq);
  2074. ASSERT(unresudq || unresgdq);
  2075. if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
  2076. delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
  2077. flags | blkflags | prjflags)))
  2078. return (error);
  2079. xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
  2080. unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
  2081. blkflags);
  2082. }
  2083. return (0);
  2084. }
  2085. int
  2086. xfs_qm_vop_rename_dqattach(
  2087. struct xfs_inode **i_tab)
  2088. {
  2089. struct xfs_mount *mp = i_tab[0]->i_mount;
  2090. int i;
  2091. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  2092. return 0;
  2093. for (i = 0; (i < 4 && i_tab[i]); i++) {
  2094. struct xfs_inode *ip = i_tab[i];
  2095. int error;
  2096. /*
  2097. * Watch out for duplicate entries in the table.
  2098. */
  2099. if (i == 0 || ip != i_tab[i-1]) {
  2100. if (XFS_NOT_DQATTACHED(mp, ip)) {
  2101. error = xfs_qm_dqattach(ip, 0);
  2102. if (error)
  2103. return error;
  2104. }
  2105. }
  2106. }
  2107. return 0;
  2108. }
  2109. void
  2110. xfs_qm_vop_create_dqattach(
  2111. struct xfs_trans *tp,
  2112. struct xfs_inode *ip,
  2113. struct xfs_dquot *udqp,
  2114. struct xfs_dquot *gdqp)
  2115. {
  2116. struct xfs_mount *mp = tp->t_mountp;
  2117. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  2118. return;
  2119. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  2120. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  2121. if (udqp) {
  2122. xfs_dqlock(udqp);
  2123. XFS_DQHOLD(udqp);
  2124. xfs_dqunlock(udqp);
  2125. ASSERT(ip->i_udquot == NULL);
  2126. ip->i_udquot = udqp;
  2127. ASSERT(XFS_IS_UQUOTA_ON(mp));
  2128. ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
  2129. xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
  2130. }
  2131. if (gdqp) {
  2132. xfs_dqlock(gdqp);
  2133. XFS_DQHOLD(gdqp);
  2134. xfs_dqunlock(gdqp);
  2135. ASSERT(ip->i_gdquot == NULL);
  2136. ip->i_gdquot = gdqp;
  2137. ASSERT(XFS_IS_OQUOTA_ON(mp));
  2138. ASSERT((XFS_IS_GQUOTA_ON(mp) ?
  2139. ip->i_d.di_gid : xfs_get_projid(ip)) ==
  2140. be32_to_cpu(gdqp->q_core.d_id));
  2141. xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
  2142. }
  2143. }