dquot.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380
  1. /*
  2. * Implementation of the diskquota system for the LINUX operating system. QUOTA
  3. * is implemented using the BSD system call interface as the means of
  4. * communication with the user level. This file contains the generic routines
  5. * called by the different filesystems on allocation of an inode or block.
  6. * These routines take care of the administration needed to have a consistent
  7. * diskquota tracking system. The ideas of both user and group quotas are based
  8. * on the Melbourne quota system as used on BSD derived systems. The internal
  9. * implementation is based on one of the several variants of the LINUX
  10. * inode-subsystem with added complexity of the diskquota system.
  11. *
  12. * Author: Marco van Wieringen <mvw@planets.elm.net>
  13. *
  14. * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
  15. *
  16. * Revised list management to avoid races
  17. * -- Bill Hawes, <whawes@star.net>, 9/98
  18. *
  19. * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
  20. * As the consequence the locking was moved from dquot_decr_...(),
  21. * dquot_incr_...() to calling functions.
  22. * invalidate_dquots() now writes modified dquots.
  23. * Serialized quota_off() and quota_on() for mount point.
  24. * Fixed a few bugs in grow_dquots().
  25. * Fixed deadlock in write_dquot() - we no longer account quotas on
  26. * quota files
  27. * remove_dquot_ref() moved to inode.c - it now traverses through inodes
  28. * add_dquot_ref() restarts after blocking
  29. * Added check for bogus uid and fixed check for group in quotactl.
  30. * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
  31. *
  32. * Used struct list_head instead of own list struct
  33. * Invalidation of referenced dquots is no longer possible
  34. * Improved free_dquots list management
  35. * Quota and i_blocks are now updated in one place to avoid races
  36. * Warnings are now delayed so we won't block in critical section
  37. * Write updated not to require dquot lock
  38. * Jan Kara, <jack@suse.cz>, 9/2000
  39. *
  40. * Added dynamic quota structure allocation
  41. * Jan Kara <jack@suse.cz> 12/2000
  42. *
  43. * Rewritten quota interface. Implemented new quota format and
  44. * formats registering.
  45. * Jan Kara, <jack@suse.cz>, 2001,2002
  46. *
  47. * New SMP locking.
  48. * Jan Kara, <jack@suse.cz>, 10/2002
  49. *
  50. * Added journalled quota support, fix lock inversion problems
  51. * Jan Kara, <jack@suse.cz>, 2003,2004
  52. *
  53. * (C) Copyright 1994 - 1997 Marco van Wieringen
  54. */
  55. #include <linux/errno.h>
  56. #include <linux/kernel.h>
  57. #include <linux/fs.h>
  58. #include <linux/mount.h>
  59. #include <linux/mm.h>
  60. #include <linux/time.h>
  61. #include <linux/types.h>
  62. #include <linux/string.h>
  63. #include <linux/fcntl.h>
  64. #include <linux/stat.h>
  65. #include <linux/tty.h>
  66. #include <linux/file.h>
  67. #include <linux/slab.h>
  68. #include <linux/sysctl.h>
  69. #include <linux/init.h>
  70. #include <linux/module.h>
  71. #include <linux/proc_fs.h>
  72. #include <linux/security.h>
  73. #include <linux/kmod.h>
  74. #include <linux/namei.h>
  75. #include <linux/buffer_head.h>
  76. #include <linux/capability.h>
  77. #include <linux/quotaops.h>
  78. #include <linux/writeback.h> /* for inode_lock, oddly enough.. */
  79. #ifdef CONFIG_QUOTA_NETLINK_INTERFACE
  80. #include <net/netlink.h>
  81. #include <net/genetlink.h>
  82. #endif
  83. #include <asm/uaccess.h>
  84. #define __DQUOT_PARANOIA
  85. /*
  86. * There are two quota SMP locks. dq_list_lock protects all lists with quotas
  87. * and quota formats and also dqstats structure containing statistics about the
  88. * lists. dq_data_lock protects data from dq_dqb and also mem_dqinfo structures
  89. * and also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
  90. * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
  91. * in inode_add_bytes() and inode_sub_bytes().
  92. *
  93. * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock
  94. *
  95. * Note that some things (eg. sb pointer, type, id) doesn't change during
  96. * the life of the dquot structure and so needn't to be protected by a lock
  97. *
  98. * Any operation working on dquots via inode pointers must hold dqptr_sem. If
  99. * operation is just reading pointers from inode (or not using them at all) the
  100. * read lock is enough. If pointers are altered function must hold write lock
  101. * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
  102. * for altering the flag i_mutex is also needed). If operation is holding
  103. * reference to dquot in other way (e.g. quotactl ops) it must be guarded by
  104. * dqonoff_mutex.
  105. * This locking assures that:
  106. * a) update/access to dquot pointers in inode is serialized
  107. * b) everyone is guarded against invalidate_dquots()
  108. *
  109. * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
  110. * from inodes (dquot_alloc_space() and such don't check the dq_lock).
  111. * Currently dquot is locked only when it is being read to memory (or space for
  112. * it is being allocated) on the first dqget() and when it is being released on
  113. * the last dqput(). The allocation and release oparations are serialized by
  114. * the dq_lock and by checking the use count in dquot_release(). Write
  115. * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
  116. * spinlock to internal buffers before writing.
  117. *
  118. * Lock ordering (including related VFS locks) is the following:
  119. * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
  120. * dqio_mutex
  121. * i_mutex on quota files is special (it's below dqio_mutex)
  122. */
  123. static DEFINE_SPINLOCK(dq_list_lock);
  124. DEFINE_SPINLOCK(dq_data_lock);
  125. static char *quotatypes[] = INITQFNAMES;
  126. static struct quota_format_type *quota_formats; /* List of registered formats */
  127. static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
  128. /* SLAB cache for dquot structures */
  129. static struct kmem_cache *dquot_cachep;
  130. int register_quota_format(struct quota_format_type *fmt)
  131. {
  132. spin_lock(&dq_list_lock);
  133. fmt->qf_next = quota_formats;
  134. quota_formats = fmt;
  135. spin_unlock(&dq_list_lock);
  136. return 0;
  137. }
  138. void unregister_quota_format(struct quota_format_type *fmt)
  139. {
  140. struct quota_format_type **actqf;
  141. spin_lock(&dq_list_lock);
  142. for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next);
  143. if (*actqf)
  144. *actqf = (*actqf)->qf_next;
  145. spin_unlock(&dq_list_lock);
  146. }
  147. static struct quota_format_type *find_quota_format(int id)
  148. {
  149. struct quota_format_type *actqf;
  150. spin_lock(&dq_list_lock);
  151. for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
  152. if (!actqf || !try_module_get(actqf->qf_owner)) {
  153. int qm;
  154. spin_unlock(&dq_list_lock);
  155. for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++);
  156. if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name))
  157. return NULL;
  158. spin_lock(&dq_list_lock);
  159. for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
  160. if (actqf && !try_module_get(actqf->qf_owner))
  161. actqf = NULL;
  162. }
  163. spin_unlock(&dq_list_lock);
  164. return actqf;
  165. }
  166. static void put_quota_format(struct quota_format_type *fmt)
  167. {
  168. module_put(fmt->qf_owner);
  169. }
  170. /*
  171. * Dquot List Management:
  172. * The quota code uses three lists for dquot management: the inuse_list,
  173. * free_dquots, and dquot_hash[] array. A single dquot structure may be
  174. * on all three lists, depending on its current state.
  175. *
  176. * All dquots are placed to the end of inuse_list when first created, and this
  177. * list is used for invalidate operation, which must look at every dquot.
  178. *
  179. * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
  180. * and this list is searched whenever we need an available dquot. Dquots are
  181. * removed from the list as soon as they are used again, and
  182. * dqstats.free_dquots gives the number of dquots on the list. When
  183. * dquot is invalidated it's completely released from memory.
  184. *
  185. * Dquots with a specific identity (device, type and id) are placed on
  186. * one of the dquot_hash[] hash chains. The provides an efficient search
  187. * mechanism to locate a specific dquot.
  188. */
  189. static LIST_HEAD(inuse_list);
  190. static LIST_HEAD(free_dquots);
  191. static unsigned int dq_hash_bits, dq_hash_mask;
  192. static struct hlist_head *dquot_hash;
  193. struct dqstats dqstats;
  194. static inline unsigned int
  195. hashfn(const struct super_block *sb, unsigned int id, int type)
  196. {
  197. unsigned long tmp;
  198. tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
  199. return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
  200. }
  201. /*
  202. * Following list functions expect dq_list_lock to be held
  203. */
  204. static inline void insert_dquot_hash(struct dquot *dquot)
  205. {
  206. struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
  207. hlist_add_head(&dquot->dq_hash, head);
  208. }
  209. static inline void remove_dquot_hash(struct dquot *dquot)
  210. {
  211. hlist_del_init(&dquot->dq_hash);
  212. }
  213. static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type)
  214. {
  215. struct hlist_node *node;
  216. struct dquot *dquot;
  217. hlist_for_each (node, dquot_hash+hashent) {
  218. dquot = hlist_entry(node, struct dquot, dq_hash);
  219. if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type)
  220. return dquot;
  221. }
  222. return NODQUOT;
  223. }
  224. /* Add a dquot to the tail of the free list */
  225. static inline void put_dquot_last(struct dquot *dquot)
  226. {
  227. list_add_tail(&dquot->dq_free, &free_dquots);
  228. dqstats.free_dquots++;
  229. }
  230. static inline void remove_free_dquot(struct dquot *dquot)
  231. {
  232. if (list_empty(&dquot->dq_free))
  233. return;
  234. list_del_init(&dquot->dq_free);
  235. dqstats.free_dquots--;
  236. }
  237. static inline void put_inuse(struct dquot *dquot)
  238. {
  239. /* We add to the back of inuse list so we don't have to restart
  240. * when traversing this list and we block */
  241. list_add_tail(&dquot->dq_inuse, &inuse_list);
  242. dqstats.allocated_dquots++;
  243. }
  244. static inline void remove_inuse(struct dquot *dquot)
  245. {
  246. dqstats.allocated_dquots--;
  247. list_del(&dquot->dq_inuse);
  248. }
  249. /*
  250. * End of list functions needing dq_list_lock
  251. */
  252. static void wait_on_dquot(struct dquot *dquot)
  253. {
  254. mutex_lock(&dquot->dq_lock);
  255. mutex_unlock(&dquot->dq_lock);
  256. }
  257. static inline int dquot_dirty(struct dquot *dquot)
  258. {
  259. return test_bit(DQ_MOD_B, &dquot->dq_flags);
  260. }
  261. static inline int mark_dquot_dirty(struct dquot *dquot)
  262. {
  263. return dquot->dq_sb->dq_op->mark_dirty(dquot);
  264. }
  265. int dquot_mark_dquot_dirty(struct dquot *dquot)
  266. {
  267. spin_lock(&dq_list_lock);
  268. if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
  269. list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
  270. info[dquot->dq_type].dqi_dirty_list);
  271. spin_unlock(&dq_list_lock);
  272. return 0;
  273. }
  274. /* This function needs dq_list_lock */
  275. static inline int clear_dquot_dirty(struct dquot *dquot)
  276. {
  277. if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
  278. return 0;
  279. list_del_init(&dquot->dq_dirty);
  280. return 1;
  281. }
  282. void mark_info_dirty(struct super_block *sb, int type)
  283. {
  284. set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
  285. }
  286. EXPORT_SYMBOL(mark_info_dirty);
  287. /*
  288. * Read dquot from disk and alloc space for it
  289. */
  290. int dquot_acquire(struct dquot *dquot)
  291. {
  292. int ret = 0, ret2 = 0;
  293. struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
  294. mutex_lock(&dquot->dq_lock);
  295. mutex_lock(&dqopt->dqio_mutex);
  296. if (!test_bit(DQ_READ_B, &dquot->dq_flags))
  297. ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
  298. if (ret < 0)
  299. goto out_iolock;
  300. set_bit(DQ_READ_B, &dquot->dq_flags);
  301. /* Instantiate dquot if needed */
  302. if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
  303. ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
  304. /* Write the info if needed */
  305. if (info_dirty(&dqopt->info[dquot->dq_type]))
  306. ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
  307. if (ret < 0)
  308. goto out_iolock;
  309. if (ret2 < 0) {
  310. ret = ret2;
  311. goto out_iolock;
  312. }
  313. }
  314. set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  315. out_iolock:
  316. mutex_unlock(&dqopt->dqio_mutex);
  317. mutex_unlock(&dquot->dq_lock);
  318. return ret;
  319. }
  320. /*
  321. * Write dquot to disk
  322. */
  323. int dquot_commit(struct dquot *dquot)
  324. {
  325. int ret = 0, ret2 = 0;
  326. struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
  327. mutex_lock(&dqopt->dqio_mutex);
  328. spin_lock(&dq_list_lock);
  329. if (!clear_dquot_dirty(dquot)) {
  330. spin_unlock(&dq_list_lock);
  331. goto out_sem;
  332. }
  333. spin_unlock(&dq_list_lock);
  334. /* Inactive dquot can be only if there was error during read/init
  335. * => we have better not writing it */
  336. if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
  337. ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
  338. if (info_dirty(&dqopt->info[dquot->dq_type]))
  339. ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
  340. if (ret >= 0)
  341. ret = ret2;
  342. }
  343. out_sem:
  344. mutex_unlock(&dqopt->dqio_mutex);
  345. return ret;
  346. }
  347. /*
  348. * Release dquot
  349. */
  350. int dquot_release(struct dquot *dquot)
  351. {
  352. int ret = 0, ret2 = 0;
  353. struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
  354. mutex_lock(&dquot->dq_lock);
  355. /* Check whether we are not racing with some other dqget() */
  356. if (atomic_read(&dquot->dq_count) > 1)
  357. goto out_dqlock;
  358. mutex_lock(&dqopt->dqio_mutex);
  359. if (dqopt->ops[dquot->dq_type]->release_dqblk) {
  360. ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
  361. /* Write the info */
  362. if (info_dirty(&dqopt->info[dquot->dq_type]))
  363. ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
  364. if (ret >= 0)
  365. ret = ret2;
  366. }
  367. clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  368. mutex_unlock(&dqopt->dqio_mutex);
  369. out_dqlock:
  370. mutex_unlock(&dquot->dq_lock);
  371. return ret;
  372. }
  373. static void dquot_destroy(struct dquot *dquot)
  374. {
  375. kmem_cache_free(dquot_cachep, dquot);
  376. }
  377. static inline void do_destroy_dquot(struct dquot *dquot)
  378. {
  379. dquot->dq_sb->dq_op->destroy_dquot(dquot);
  380. }
  381. /* Invalidate all dquots on the list. Note that this function is called after
  382. * quota is disabled and pointers from inodes removed so there cannot be new
  383. * quota users. There can still be some users of quotas due to inodes being
  384. * just deleted or pruned by prune_icache() (those are not attached to any
  385. * list). We have to wait for such users.
  386. */
  387. static void invalidate_dquots(struct super_block *sb, int type)
  388. {
  389. struct dquot *dquot, *tmp;
  390. restart:
  391. spin_lock(&dq_list_lock);
  392. list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
  393. if (dquot->dq_sb != sb)
  394. continue;
  395. if (dquot->dq_type != type)
  396. continue;
  397. /* Wait for dquot users */
  398. if (atomic_read(&dquot->dq_count)) {
  399. DEFINE_WAIT(wait);
  400. atomic_inc(&dquot->dq_count);
  401. prepare_to_wait(&dquot->dq_wait_unused, &wait,
  402. TASK_UNINTERRUPTIBLE);
  403. spin_unlock(&dq_list_lock);
  404. /* Once dqput() wakes us up, we know it's time to free
  405. * the dquot.
  406. * IMPORTANT: we rely on the fact that there is always
  407. * at most one process waiting for dquot to free.
  408. * Otherwise dq_count would be > 1 and we would never
  409. * wake up.
  410. */
  411. if (atomic_read(&dquot->dq_count) > 1)
  412. schedule();
  413. finish_wait(&dquot->dq_wait_unused, &wait);
  414. dqput(dquot);
  415. /* At this moment dquot() need not exist (it could be
  416. * reclaimed by prune_dqcache(). Hence we must
  417. * restart. */
  418. goto restart;
  419. }
  420. /*
  421. * Quota now has no users and it has been written on last
  422. * dqput()
  423. */
  424. remove_dquot_hash(dquot);
  425. remove_free_dquot(dquot);
  426. remove_inuse(dquot);
  427. do_destroy_dquot(dquot);
  428. }
  429. spin_unlock(&dq_list_lock);
  430. }
  431. /* Call callback for every active dquot on given filesystem */
  432. int dquot_scan_active(struct super_block *sb,
  433. int (*fn)(struct dquot *dquot, unsigned long priv),
  434. unsigned long priv)
  435. {
  436. struct dquot *dquot, *old_dquot = NULL;
  437. int ret = 0;
  438. mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
  439. spin_lock(&dq_list_lock);
  440. list_for_each_entry(dquot, &inuse_list, dq_inuse) {
  441. if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
  442. continue;
  443. if (dquot->dq_sb != sb)
  444. continue;
  445. /* Now we have active dquot so we can just increase use count */
  446. atomic_inc(&dquot->dq_count);
  447. dqstats.lookups++;
  448. spin_unlock(&dq_list_lock);
  449. dqput(old_dquot);
  450. old_dquot = dquot;
  451. ret = fn(dquot, priv);
  452. if (ret < 0)
  453. goto out;
  454. spin_lock(&dq_list_lock);
  455. /* We are safe to continue now because our dquot could not
  456. * be moved out of the inuse list while we hold the reference */
  457. }
  458. spin_unlock(&dq_list_lock);
  459. out:
  460. dqput(old_dquot);
  461. mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
  462. return ret;
  463. }
  464. int vfs_quota_sync(struct super_block *sb, int type)
  465. {
  466. struct list_head *dirty;
  467. struct dquot *dquot;
  468. struct quota_info *dqopt = sb_dqopt(sb);
  469. int cnt;
  470. mutex_lock(&dqopt->dqonoff_mutex);
  471. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  472. if (type != -1 && cnt != type)
  473. continue;
  474. if (!sb_has_quota_active(sb, cnt))
  475. continue;
  476. spin_lock(&dq_list_lock);
  477. dirty = &dqopt->info[cnt].dqi_dirty_list;
  478. while (!list_empty(dirty)) {
  479. dquot = list_first_entry(dirty, struct dquot, dq_dirty);
  480. /* Dirty and inactive can be only bad dquot... */
  481. if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
  482. clear_dquot_dirty(dquot);
  483. continue;
  484. }
  485. /* Now we have active dquot from which someone is
  486. * holding reference so we can safely just increase
  487. * use count */
  488. atomic_inc(&dquot->dq_count);
  489. dqstats.lookups++;
  490. spin_unlock(&dq_list_lock);
  491. sb->dq_op->write_dquot(dquot);
  492. dqput(dquot);
  493. spin_lock(&dq_list_lock);
  494. }
  495. spin_unlock(&dq_list_lock);
  496. }
  497. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  498. if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
  499. && info_dirty(&dqopt->info[cnt]))
  500. sb->dq_op->write_info(sb, cnt);
  501. spin_lock(&dq_list_lock);
  502. dqstats.syncs++;
  503. spin_unlock(&dq_list_lock);
  504. mutex_unlock(&dqopt->dqonoff_mutex);
  505. return 0;
  506. }
  507. /* Free unused dquots from cache */
  508. static void prune_dqcache(int count)
  509. {
  510. struct list_head *head;
  511. struct dquot *dquot;
  512. head = free_dquots.prev;
  513. while (head != &free_dquots && count) {
  514. dquot = list_entry(head, struct dquot, dq_free);
  515. remove_dquot_hash(dquot);
  516. remove_free_dquot(dquot);
  517. remove_inuse(dquot);
  518. do_destroy_dquot(dquot);
  519. count--;
  520. head = free_dquots.prev;
  521. }
  522. }
  523. /*
  524. * This is called from kswapd when we think we need some
  525. * more memory
  526. */
  527. static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
  528. {
  529. if (nr) {
  530. spin_lock(&dq_list_lock);
  531. prune_dqcache(nr);
  532. spin_unlock(&dq_list_lock);
  533. }
  534. return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
  535. }
  536. static struct shrinker dqcache_shrinker = {
  537. .shrink = shrink_dqcache_memory,
  538. .seeks = DEFAULT_SEEKS,
  539. };
  540. /*
  541. * Put reference to dquot
  542. * NOTE: If you change this function please check whether dqput_blocks() works right...
  543. * MUST be called with either dqptr_sem or dqonoff_mutex held
  544. */
  545. void dqput(struct dquot *dquot)
  546. {
  547. int ret;
  548. if (!dquot)
  549. return;
  550. #ifdef __DQUOT_PARANOIA
  551. if (!atomic_read(&dquot->dq_count)) {
  552. printk("VFS: dqput: trying to free free dquot\n");
  553. printk("VFS: device %s, dquot of %s %d\n",
  554. dquot->dq_sb->s_id,
  555. quotatypes[dquot->dq_type],
  556. dquot->dq_id);
  557. BUG();
  558. }
  559. #endif
  560. spin_lock(&dq_list_lock);
  561. dqstats.drops++;
  562. spin_unlock(&dq_list_lock);
  563. we_slept:
  564. spin_lock(&dq_list_lock);
  565. if (atomic_read(&dquot->dq_count) > 1) {
  566. /* We have more than one user... nothing to do */
  567. atomic_dec(&dquot->dq_count);
  568. /* Releasing dquot during quotaoff phase? */
  569. if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
  570. atomic_read(&dquot->dq_count) == 1)
  571. wake_up(&dquot->dq_wait_unused);
  572. spin_unlock(&dq_list_lock);
  573. return;
  574. }
  575. /* Need to release dquot? */
  576. if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
  577. spin_unlock(&dq_list_lock);
  578. /* Commit dquot before releasing */
  579. ret = dquot->dq_sb->dq_op->write_dquot(dquot);
  580. if (ret < 0) {
  581. printk(KERN_ERR "VFS: cannot write quota structure on "
  582. "device %s (error %d). Quota may get out of "
  583. "sync!\n", dquot->dq_sb->s_id, ret);
  584. /*
  585. * We clear dirty bit anyway, so that we avoid
  586. * infinite loop here
  587. */
  588. spin_lock(&dq_list_lock);
  589. clear_dquot_dirty(dquot);
  590. spin_unlock(&dq_list_lock);
  591. }
  592. goto we_slept;
  593. }
  594. /* Clear flag in case dquot was inactive (something bad happened) */
  595. clear_dquot_dirty(dquot);
  596. if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
  597. spin_unlock(&dq_list_lock);
  598. dquot->dq_sb->dq_op->release_dquot(dquot);
  599. goto we_slept;
  600. }
  601. atomic_dec(&dquot->dq_count);
  602. #ifdef __DQUOT_PARANOIA
  603. /* sanity check */
  604. BUG_ON(!list_empty(&dquot->dq_free));
  605. #endif
  606. put_dquot_last(dquot);
  607. spin_unlock(&dq_list_lock);
  608. }
  609. static struct dquot *dquot_alloc(struct super_block *sb, int type)
  610. {
  611. return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
  612. }
  613. static struct dquot *get_empty_dquot(struct super_block *sb, int type)
  614. {
  615. struct dquot *dquot;
  616. dquot = sb->dq_op->alloc_dquot(sb, type);
  617. if(!dquot)
  618. return NODQUOT;
  619. mutex_init(&dquot->dq_lock);
  620. INIT_LIST_HEAD(&dquot->dq_free);
  621. INIT_LIST_HEAD(&dquot->dq_inuse);
  622. INIT_HLIST_NODE(&dquot->dq_hash);
  623. INIT_LIST_HEAD(&dquot->dq_dirty);
  624. init_waitqueue_head(&dquot->dq_wait_unused);
  625. dquot->dq_sb = sb;
  626. dquot->dq_type = type;
  627. atomic_set(&dquot->dq_count, 1);
  628. return dquot;
  629. }
  630. /*
  631. * Check whether dquot is in memory.
  632. * MUST be called with either dqptr_sem or dqonoff_mutex held
  633. */
  634. int dquot_is_cached(struct super_block *sb, unsigned int id, int type)
  635. {
  636. unsigned int hashent = hashfn(sb, id, type);
  637. int ret = 0;
  638. if (!sb_has_quota_active(sb, type))
  639. return 0;
  640. spin_lock(&dq_list_lock);
  641. if (find_dquot(hashent, sb, id, type) != NODQUOT)
  642. ret = 1;
  643. spin_unlock(&dq_list_lock);
  644. return ret;
  645. }
  646. /*
  647. * Get reference to dquot
  648. * MUST be called with either dqptr_sem or dqonoff_mutex held
  649. */
  650. struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
  651. {
  652. unsigned int hashent = hashfn(sb, id, type);
  653. struct dquot *dquot, *empty = NODQUOT;
  654. if (!sb_has_quota_active(sb, type))
  655. return NODQUOT;
  656. we_slept:
  657. spin_lock(&dq_list_lock);
  658. if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) {
  659. if (empty == NODQUOT) {
  660. spin_unlock(&dq_list_lock);
  661. if ((empty = get_empty_dquot(sb, type)) == NODQUOT)
  662. schedule(); /* Try to wait for a moment... */
  663. goto we_slept;
  664. }
  665. dquot = empty;
  666. dquot->dq_id = id;
  667. /* all dquots go on the inuse_list */
  668. put_inuse(dquot);
  669. /* hash it first so it can be found */
  670. insert_dquot_hash(dquot);
  671. dqstats.lookups++;
  672. spin_unlock(&dq_list_lock);
  673. } else {
  674. if (!atomic_read(&dquot->dq_count))
  675. remove_free_dquot(dquot);
  676. atomic_inc(&dquot->dq_count);
  677. dqstats.cache_hits++;
  678. dqstats.lookups++;
  679. spin_unlock(&dq_list_lock);
  680. if (empty)
  681. do_destroy_dquot(empty);
  682. }
  683. /* Wait for dq_lock - after this we know that either dquot_release() is already
  684. * finished or it will be canceled due to dq_count > 1 test */
  685. wait_on_dquot(dquot);
  686. /* Read the dquot and instantiate it (everything done only if needed) */
  687. if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) {
  688. dqput(dquot);
  689. return NODQUOT;
  690. }
  691. #ifdef __DQUOT_PARANOIA
  692. BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
  693. #endif
  694. return dquot;
  695. }
  696. static int dqinit_needed(struct inode *inode, int type)
  697. {
  698. int cnt;
  699. if (IS_NOQUOTA(inode))
  700. return 0;
  701. if (type != -1)
  702. return inode->i_dquot[type] == NODQUOT;
  703. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  704. if (inode->i_dquot[cnt] == NODQUOT)
  705. return 1;
  706. return 0;
  707. }
  708. /* This routine is guarded by dqonoff_mutex mutex */
  709. static void add_dquot_ref(struct super_block *sb, int type)
  710. {
  711. struct inode *inode, *old_inode = NULL;
  712. spin_lock(&inode_lock);
  713. list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
  714. if (!atomic_read(&inode->i_writecount))
  715. continue;
  716. if (!dqinit_needed(inode, type))
  717. continue;
  718. if (inode->i_state & (I_FREEING|I_WILL_FREE))
  719. continue;
  720. __iget(inode);
  721. spin_unlock(&inode_lock);
  722. iput(old_inode);
  723. sb->dq_op->initialize(inode, type);
  724. /* We hold a reference to 'inode' so it couldn't have been
  725. * removed from s_inodes list while we dropped the inode_lock.
  726. * We cannot iput the inode now as we can be holding the last
  727. * reference and we cannot iput it under inode_lock. So we
  728. * keep the reference and iput it later. */
  729. old_inode = inode;
  730. spin_lock(&inode_lock);
  731. }
  732. spin_unlock(&inode_lock);
  733. iput(old_inode);
  734. }
  735. /* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
  736. static inline int dqput_blocks(struct dquot *dquot)
  737. {
  738. if (atomic_read(&dquot->dq_count) <= 1)
  739. return 1;
  740. return 0;
  741. }
  742. /* Remove references to dquots from inode - add dquot to list for freeing if needed */
  743. /* We can't race with anybody because we hold dqptr_sem for writing... */
  744. static int remove_inode_dquot_ref(struct inode *inode, int type,
  745. struct list_head *tofree_head)
  746. {
  747. struct dquot *dquot = inode->i_dquot[type];
  748. inode->i_dquot[type] = NODQUOT;
  749. if (dquot != NODQUOT) {
  750. if (dqput_blocks(dquot)) {
  751. #ifdef __DQUOT_PARANOIA
  752. if (atomic_read(&dquot->dq_count) != 1)
  753. printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
  754. #endif
  755. spin_lock(&dq_list_lock);
  756. list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */
  757. spin_unlock(&dq_list_lock);
  758. return 1;
  759. }
  760. else
  761. dqput(dquot); /* We have guaranteed we won't block */
  762. }
  763. return 0;
  764. }
  765. /* Free list of dquots - called from inode.c */
  766. /* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */
  767. static void put_dquot_list(struct list_head *tofree_head)
  768. {
  769. struct list_head *act_head;
  770. struct dquot *dquot;
  771. act_head = tofree_head->next;
  772. /* So now we have dquots on the list... Just free them */
  773. while (act_head != tofree_head) {
  774. dquot = list_entry(act_head, struct dquot, dq_free);
  775. act_head = act_head->next;
  776. list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */
  777. dqput(dquot);
  778. }
  779. }
  780. static void remove_dquot_ref(struct super_block *sb, int type,
  781. struct list_head *tofree_head)
  782. {
  783. struct inode *inode;
  784. spin_lock(&inode_lock);
  785. list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
  786. if (!IS_NOQUOTA(inode))
  787. remove_inode_dquot_ref(inode, type, tofree_head);
  788. }
  789. spin_unlock(&inode_lock);
  790. }
  791. /* Gather all references from inodes and drop them */
  792. static void drop_dquot_ref(struct super_block *sb, int type)
  793. {
  794. LIST_HEAD(tofree_head);
  795. if (sb->dq_op) {
  796. down_write(&sb_dqopt(sb)->dqptr_sem);
  797. remove_dquot_ref(sb, type, &tofree_head);
  798. up_write(&sb_dqopt(sb)->dqptr_sem);
  799. put_dquot_list(&tofree_head);
  800. }
  801. }
  802. static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
  803. {
  804. dquot->dq_dqb.dqb_curinodes += number;
  805. }
  806. static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
  807. {
  808. dquot->dq_dqb.dqb_curspace += number;
  809. }
  810. static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
  811. {
  812. if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
  813. dquot->dq_dqb.dqb_curinodes >= number)
  814. dquot->dq_dqb.dqb_curinodes -= number;
  815. else
  816. dquot->dq_dqb.dqb_curinodes = 0;
  817. if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
  818. dquot->dq_dqb.dqb_itime = (time_t) 0;
  819. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  820. }
  821. static inline void dquot_decr_space(struct dquot *dquot, qsize_t number)
  822. {
  823. if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
  824. dquot->dq_dqb.dqb_curspace >= number)
  825. dquot->dq_dqb.dqb_curspace -= number;
  826. else
  827. dquot->dq_dqb.dqb_curspace = 0;
  828. if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
  829. dquot->dq_dqb.dqb_btime = (time_t) 0;
  830. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  831. }
  832. static int warning_issued(struct dquot *dquot, const int warntype)
  833. {
  834. int flag = (warntype == QUOTA_NL_BHARDWARN ||
  835. warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
  836. ((warntype == QUOTA_NL_IHARDWARN ||
  837. warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
  838. if (!flag)
  839. return 0;
  840. return test_and_set_bit(flag, &dquot->dq_flags);
  841. }
  842. #ifdef CONFIG_PRINT_QUOTA_WARNING
  843. static int flag_print_warnings = 1;
  844. static inline int need_print_warning(struct dquot *dquot)
  845. {
  846. if (!flag_print_warnings)
  847. return 0;
  848. switch (dquot->dq_type) {
  849. case USRQUOTA:
  850. return current_fsuid() == dquot->dq_id;
  851. case GRPQUOTA:
  852. return in_group_p(dquot->dq_id);
  853. }
  854. return 0;
  855. }
  856. /* Print warning to user which exceeded quota */
  857. static void print_warning(struct dquot *dquot, const int warntype)
  858. {
  859. char *msg = NULL;
  860. struct tty_struct *tty;
  861. if (warntype == QUOTA_NL_IHARDBELOW ||
  862. warntype == QUOTA_NL_ISOFTBELOW ||
  863. warntype == QUOTA_NL_BHARDBELOW ||
  864. warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot))
  865. return;
  866. tty = get_current_tty();
  867. if (!tty)
  868. return;
  869. tty_write_message(tty, dquot->dq_sb->s_id);
  870. if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
  871. tty_write_message(tty, ": warning, ");
  872. else
  873. tty_write_message(tty, ": write failed, ");
  874. tty_write_message(tty, quotatypes[dquot->dq_type]);
  875. switch (warntype) {
  876. case QUOTA_NL_IHARDWARN:
  877. msg = " file limit reached.\r\n";
  878. break;
  879. case QUOTA_NL_ISOFTLONGWARN:
  880. msg = " file quota exceeded too long.\r\n";
  881. break;
  882. case QUOTA_NL_ISOFTWARN:
  883. msg = " file quota exceeded.\r\n";
  884. break;
  885. case QUOTA_NL_BHARDWARN:
  886. msg = " block limit reached.\r\n";
  887. break;
  888. case QUOTA_NL_BSOFTLONGWARN:
  889. msg = " block quota exceeded too long.\r\n";
  890. break;
  891. case QUOTA_NL_BSOFTWARN:
  892. msg = " block quota exceeded.\r\n";
  893. break;
  894. }
  895. tty_write_message(tty, msg);
  896. tty_kref_put(tty);
  897. }
  898. #endif
  899. #ifdef CONFIG_QUOTA_NETLINK_INTERFACE
  900. /* Netlink family structure for quota */
  901. static struct genl_family quota_genl_family = {
  902. .id = GENL_ID_GENERATE,
  903. .hdrsize = 0,
  904. .name = "VFS_DQUOT",
  905. .version = 1,
  906. .maxattr = QUOTA_NL_A_MAX,
  907. };
  908. /* Send warning to userspace about user which exceeded quota */
  909. static void send_warning(const struct dquot *dquot, const char warntype)
  910. {
  911. static atomic_t seq;
  912. struct sk_buff *skb;
  913. void *msg_head;
  914. int ret;
  915. int msg_size = 4 * nla_total_size(sizeof(u32)) +
  916. 2 * nla_total_size(sizeof(u64));
  917. /* We have to allocate using GFP_NOFS as we are called from a
  918. * filesystem performing write and thus further recursion into
  919. * the fs to free some data could cause deadlocks. */
  920. skb = genlmsg_new(msg_size, GFP_NOFS);
  921. if (!skb) {
  922. printk(KERN_ERR
  923. "VFS: Not enough memory to send quota warning.\n");
  924. return;
  925. }
  926. msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
  927. &quota_genl_family, 0, QUOTA_NL_C_WARNING);
  928. if (!msg_head) {
  929. printk(KERN_ERR
  930. "VFS: Cannot store netlink header in quota warning.\n");
  931. goto err_out;
  932. }
  933. ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
  934. if (ret)
  935. goto attr_err_out;
  936. ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
  937. if (ret)
  938. goto attr_err_out;
  939. ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
  940. if (ret)
  941. goto attr_err_out;
  942. ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
  943. MAJOR(dquot->dq_sb->s_dev));
  944. if (ret)
  945. goto attr_err_out;
  946. ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
  947. MINOR(dquot->dq_sb->s_dev));
  948. if (ret)
  949. goto attr_err_out;
  950. ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
  951. if (ret)
  952. goto attr_err_out;
  953. genlmsg_end(skb, msg_head);
  954. ret = genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
  955. if (ret < 0 && ret != -ESRCH)
  956. printk(KERN_ERR
  957. "VFS: Failed to send notification message: %d\n", ret);
  958. return;
  959. attr_err_out:
  960. printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
  961. err_out:
  962. kfree_skb(skb);
  963. }
  964. #endif
  965. static inline void flush_warnings(struct dquot * const *dquots, char *warntype)
  966. {
  967. int i;
  968. for (i = 0; i < MAXQUOTAS; i++)
  969. if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN &&
  970. !warning_issued(dquots[i], warntype[i])) {
  971. #ifdef CONFIG_PRINT_QUOTA_WARNING
  972. print_warning(dquots[i], warntype[i]);
  973. #endif
  974. #ifdef CONFIG_QUOTA_NETLINK_INTERFACE
  975. send_warning(dquots[i], warntype[i]);
  976. #endif
  977. }
  978. }
  979. static inline char ignore_hardlimit(struct dquot *dquot)
  980. {
  981. struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
  982. return capable(CAP_SYS_RESOURCE) &&
  983. (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH));
  984. }
  985. /* needs dq_data_lock */
  986. static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
  987. {
  988. *warntype = QUOTA_NL_NOWARN;
  989. if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
  990. test_bit(DQ_FAKE_B, &dquot->dq_flags))
  991. return QUOTA_OK;
  992. if (dquot->dq_dqb.dqb_ihardlimit &&
  993. (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit &&
  994. !ignore_hardlimit(dquot)) {
  995. *warntype = QUOTA_NL_IHARDWARN;
  996. return NO_QUOTA;
  997. }
  998. if (dquot->dq_dqb.dqb_isoftlimit &&
  999. (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
  1000. dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime &&
  1001. !ignore_hardlimit(dquot)) {
  1002. *warntype = QUOTA_NL_ISOFTLONGWARN;
  1003. return NO_QUOTA;
  1004. }
  1005. if (dquot->dq_dqb.dqb_isoftlimit &&
  1006. (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
  1007. dquot->dq_dqb.dqb_itime == 0) {
  1008. *warntype = QUOTA_NL_ISOFTWARN;
  1009. dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
  1010. }
  1011. return QUOTA_OK;
  1012. }
  1013. /* needs dq_data_lock */
  1014. static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
  1015. {
  1016. *warntype = QUOTA_NL_NOWARN;
  1017. if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
  1018. test_bit(DQ_FAKE_B, &dquot->dq_flags))
  1019. return QUOTA_OK;
  1020. if (dquot->dq_dqb.dqb_bhardlimit &&
  1021. dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bhardlimit &&
  1022. !ignore_hardlimit(dquot)) {
  1023. if (!prealloc)
  1024. *warntype = QUOTA_NL_BHARDWARN;
  1025. return NO_QUOTA;
  1026. }
  1027. if (dquot->dq_dqb.dqb_bsoftlimit &&
  1028. dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
  1029. dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime &&
  1030. !ignore_hardlimit(dquot)) {
  1031. if (!prealloc)
  1032. *warntype = QUOTA_NL_BSOFTLONGWARN;
  1033. return NO_QUOTA;
  1034. }
  1035. if (dquot->dq_dqb.dqb_bsoftlimit &&
  1036. dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
  1037. dquot->dq_dqb.dqb_btime == 0) {
  1038. if (!prealloc) {
  1039. *warntype = QUOTA_NL_BSOFTWARN;
  1040. dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
  1041. }
  1042. else
  1043. /*
  1044. * We don't allow preallocation to exceed softlimit so exceeding will
  1045. * be always printed
  1046. */
  1047. return NO_QUOTA;
  1048. }
  1049. return QUOTA_OK;
  1050. }
  1051. static int info_idq_free(struct dquot *dquot, qsize_t inodes)
  1052. {
  1053. if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
  1054. dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
  1055. !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
  1056. return QUOTA_NL_NOWARN;
  1057. if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit)
  1058. return QUOTA_NL_ISOFTBELOW;
  1059. if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
  1060. dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit)
  1061. return QUOTA_NL_IHARDBELOW;
  1062. return QUOTA_NL_NOWARN;
  1063. }
  1064. static int info_bdq_free(struct dquot *dquot, qsize_t space)
  1065. {
  1066. if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
  1067. dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
  1068. return QUOTA_NL_NOWARN;
  1069. if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
  1070. return QUOTA_NL_BSOFTBELOW;
  1071. if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
  1072. dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
  1073. return QUOTA_NL_BHARDBELOW;
  1074. return QUOTA_NL_NOWARN;
  1075. }
  1076. /*
  1077. * Initialize quota pointers in inode
  1078. * Transaction must be started at entry
  1079. */
  1080. int dquot_initialize(struct inode *inode, int type)
  1081. {
  1082. unsigned int id = 0;
  1083. int cnt, ret = 0;
  1084. /* First test before acquiring mutex - solves deadlocks when we
  1085. * re-enter the quota code and are already holding the mutex */
  1086. if (IS_NOQUOTA(inode))
  1087. return 0;
  1088. down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1089. /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
  1090. if (IS_NOQUOTA(inode))
  1091. goto out_err;
  1092. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1093. if (type != -1 && cnt != type)
  1094. continue;
  1095. if (inode->i_dquot[cnt] == NODQUOT) {
  1096. switch (cnt) {
  1097. case USRQUOTA:
  1098. id = inode->i_uid;
  1099. break;
  1100. case GRPQUOTA:
  1101. id = inode->i_gid;
  1102. break;
  1103. }
  1104. inode->i_dquot[cnt] = dqget(inode->i_sb, id, cnt);
  1105. }
  1106. }
  1107. out_err:
  1108. up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1109. return ret;
  1110. }
  1111. /*
  1112. * Release all quotas referenced by inode
  1113. * Transaction must be started at an entry
  1114. */
  1115. int dquot_drop_locked(struct inode *inode)
  1116. {
  1117. int cnt;
  1118. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1119. if (inode->i_dquot[cnt] != NODQUOT) {
  1120. dqput(inode->i_dquot[cnt]);
  1121. inode->i_dquot[cnt] = NODQUOT;
  1122. }
  1123. }
  1124. return 0;
  1125. }
  1126. int dquot_drop(struct inode *inode)
  1127. {
  1128. down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1129. dquot_drop_locked(inode);
  1130. up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1131. return 0;
  1132. }
  1133. /* Wrapper to remove references to quota structures from inode */
  1134. void vfs_dq_drop(struct inode *inode)
  1135. {
  1136. /* Here we can get arbitrary inode from clear_inode() so we have
  1137. * to be careful. OTOH we don't need locking as quota operations
  1138. * are allowed to change only at mount time */
  1139. if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op
  1140. && inode->i_sb->dq_op->drop) {
  1141. int cnt;
  1142. /* Test before calling to rule out calls from proc and such
  1143. * where we are not allowed to block. Note that this is
  1144. * actually reliable test even without the lock - the caller
  1145. * must assure that nobody can come after the DQUOT_DROP and
  1146. * add quota pointers back anyway */
  1147. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1148. if (inode->i_dquot[cnt] != NODQUOT)
  1149. break;
  1150. if (cnt < MAXQUOTAS)
  1151. inode->i_sb->dq_op->drop(inode);
  1152. }
  1153. }
  1154. /*
  1155. * Following four functions update i_blocks+i_bytes fields and
  1156. * quota information (together with appropriate checks)
  1157. * NOTE: We absolutely rely on the fact that caller dirties
  1158. * the inode (usually macros in quotaops.h care about this) and
  1159. * holds a handle for the current transaction so that dquot write and
  1160. * inode write go into the same transaction.
  1161. */
  1162. /*
  1163. * This operation can block, but only after everything is updated
  1164. */
  1165. int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
  1166. {
  1167. int cnt, ret = NO_QUOTA;
  1168. char warntype[MAXQUOTAS];
  1169. /* First test before acquiring mutex - solves deadlocks when we
  1170. * re-enter the quota code and are already holding the mutex */
  1171. if (IS_NOQUOTA(inode)) {
  1172. out_add:
  1173. inode_add_bytes(inode, number);
  1174. return QUOTA_OK;
  1175. }
  1176. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1177. warntype[cnt] = QUOTA_NL_NOWARN;
  1178. down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1179. if (IS_NOQUOTA(inode)) { /* Now we can do reliable test... */
  1180. up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1181. goto out_add;
  1182. }
  1183. spin_lock(&dq_data_lock);
  1184. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1185. if (inode->i_dquot[cnt] == NODQUOT)
  1186. continue;
  1187. if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA)
  1188. goto warn_put_all;
  1189. }
  1190. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1191. if (inode->i_dquot[cnt] == NODQUOT)
  1192. continue;
  1193. dquot_incr_space(inode->i_dquot[cnt], number);
  1194. }
  1195. inode_add_bytes(inode, number);
  1196. ret = QUOTA_OK;
  1197. warn_put_all:
  1198. spin_unlock(&dq_data_lock);
  1199. if (ret == QUOTA_OK)
  1200. /* Dirtify all the dquots - this can block when journalling */
  1201. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1202. if (inode->i_dquot[cnt])
  1203. mark_dquot_dirty(inode->i_dquot[cnt]);
  1204. flush_warnings(inode->i_dquot, warntype);
  1205. up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1206. return ret;
  1207. }
  1208. /*
  1209. * This operation can block, but only after everything is updated
  1210. */
  1211. int dquot_alloc_inode(const struct inode *inode, qsize_t number)
  1212. {
  1213. int cnt, ret = NO_QUOTA;
  1214. char warntype[MAXQUOTAS];
  1215. /* First test before acquiring mutex - solves deadlocks when we
  1216. * re-enter the quota code and are already holding the mutex */
  1217. if (IS_NOQUOTA(inode))
  1218. return QUOTA_OK;
  1219. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1220. warntype[cnt] = QUOTA_NL_NOWARN;
  1221. down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1222. if (IS_NOQUOTA(inode)) {
  1223. up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1224. return QUOTA_OK;
  1225. }
  1226. spin_lock(&dq_data_lock);
  1227. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1228. if (inode->i_dquot[cnt] == NODQUOT)
  1229. continue;
  1230. if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA)
  1231. goto warn_put_all;
  1232. }
  1233. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1234. if (inode->i_dquot[cnt] == NODQUOT)
  1235. continue;
  1236. dquot_incr_inodes(inode->i_dquot[cnt], number);
  1237. }
  1238. ret = QUOTA_OK;
  1239. warn_put_all:
  1240. spin_unlock(&dq_data_lock);
  1241. if (ret == QUOTA_OK)
  1242. /* Dirtify all the dquots - this can block when journalling */
  1243. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1244. if (inode->i_dquot[cnt])
  1245. mark_dquot_dirty(inode->i_dquot[cnt]);
  1246. flush_warnings(inode->i_dquot, warntype);
  1247. up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1248. return ret;
  1249. }
  1250. /*
  1251. * This operation can block, but only after everything is updated
  1252. */
  1253. int dquot_free_space(struct inode *inode, qsize_t number)
  1254. {
  1255. unsigned int cnt;
  1256. char warntype[MAXQUOTAS];
  1257. /* First test before acquiring mutex - solves deadlocks when we
  1258. * re-enter the quota code and are already holding the mutex */
  1259. if (IS_NOQUOTA(inode)) {
  1260. out_sub:
  1261. inode_sub_bytes(inode, number);
  1262. return QUOTA_OK;
  1263. }
  1264. down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1265. /* Now recheck reliably when holding dqptr_sem */
  1266. if (IS_NOQUOTA(inode)) {
  1267. up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1268. goto out_sub;
  1269. }
  1270. spin_lock(&dq_data_lock);
  1271. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1272. if (inode->i_dquot[cnt] == NODQUOT)
  1273. continue;
  1274. warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
  1275. dquot_decr_space(inode->i_dquot[cnt], number);
  1276. }
  1277. inode_sub_bytes(inode, number);
  1278. spin_unlock(&dq_data_lock);
  1279. /* Dirtify all the dquots - this can block when journalling */
  1280. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1281. if (inode->i_dquot[cnt])
  1282. mark_dquot_dirty(inode->i_dquot[cnt]);
  1283. flush_warnings(inode->i_dquot, warntype);
  1284. up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1285. return QUOTA_OK;
  1286. }
  1287. /*
  1288. * This operation can block, but only after everything is updated
  1289. */
  1290. int dquot_free_inode(const struct inode *inode, qsize_t number)
  1291. {
  1292. unsigned int cnt;
  1293. char warntype[MAXQUOTAS];
  1294. /* First test before acquiring mutex - solves deadlocks when we
  1295. * re-enter the quota code and are already holding the mutex */
  1296. if (IS_NOQUOTA(inode))
  1297. return QUOTA_OK;
  1298. down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1299. /* Now recheck reliably when holding dqptr_sem */
  1300. if (IS_NOQUOTA(inode)) {
  1301. up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1302. return QUOTA_OK;
  1303. }
  1304. spin_lock(&dq_data_lock);
  1305. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1306. if (inode->i_dquot[cnt] == NODQUOT)
  1307. continue;
  1308. warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
  1309. dquot_decr_inodes(inode->i_dquot[cnt], number);
  1310. }
  1311. spin_unlock(&dq_data_lock);
  1312. /* Dirtify all the dquots - this can block when journalling */
  1313. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1314. if (inode->i_dquot[cnt])
  1315. mark_dquot_dirty(inode->i_dquot[cnt]);
  1316. flush_warnings(inode->i_dquot, warntype);
  1317. up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1318. return QUOTA_OK;
  1319. }
  1320. /*
  1321. * Transfer the number of inode and blocks from one diskquota to an other.
  1322. *
  1323. * This operation can block, but only after everything is updated
  1324. * A transaction must be started when entering this function.
  1325. */
  1326. int dquot_transfer(struct inode *inode, struct iattr *iattr)
  1327. {
  1328. qsize_t space;
  1329. struct dquot *transfer_from[MAXQUOTAS];
  1330. struct dquot *transfer_to[MAXQUOTAS];
  1331. int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid,
  1332. chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid;
  1333. char warntype_to[MAXQUOTAS];
  1334. char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
  1335. /* First test before acquiring mutex - solves deadlocks when we
  1336. * re-enter the quota code and are already holding the mutex */
  1337. if (IS_NOQUOTA(inode))
  1338. return QUOTA_OK;
  1339. /* Clear the arrays */
  1340. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1341. transfer_to[cnt] = transfer_from[cnt] = NODQUOT;
  1342. warntype_to[cnt] = QUOTA_NL_NOWARN;
  1343. }
  1344. down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1345. /* Now recheck reliably when holding dqptr_sem */
  1346. if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
  1347. up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1348. return QUOTA_OK;
  1349. }
  1350. /* First build the transfer_to list - here we can block on
  1351. * reading/instantiating of dquots. We know that the transaction for
  1352. * us was already started so we don't violate lock ranking here */
  1353. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1354. switch (cnt) {
  1355. case USRQUOTA:
  1356. if (!chuid)
  1357. continue;
  1358. transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
  1359. break;
  1360. case GRPQUOTA:
  1361. if (!chgid)
  1362. continue;
  1363. transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
  1364. break;
  1365. }
  1366. }
  1367. spin_lock(&dq_data_lock);
  1368. space = inode_get_bytes(inode);
  1369. /* Build the transfer_from list and check the limits */
  1370. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1371. if (transfer_to[cnt] == NODQUOT)
  1372. continue;
  1373. transfer_from[cnt] = inode->i_dquot[cnt];
  1374. if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
  1375. NO_QUOTA || check_bdq(transfer_to[cnt], space, 0,
  1376. warntype_to + cnt) == NO_QUOTA)
  1377. goto warn_put_all;
  1378. }
  1379. /*
  1380. * Finally perform the needed transfer from transfer_from to transfer_to
  1381. */
  1382. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1383. /*
  1384. * Skip changes for same uid or gid or for turned off quota-type.
  1385. */
  1386. if (transfer_to[cnt] == NODQUOT)
  1387. continue;
  1388. /* Due to IO error we might not have transfer_from[] structure */
  1389. if (transfer_from[cnt]) {
  1390. warntype_from_inodes[cnt] =
  1391. info_idq_free(transfer_from[cnt], 1);
  1392. warntype_from_space[cnt] =
  1393. info_bdq_free(transfer_from[cnt], space);
  1394. dquot_decr_inodes(transfer_from[cnt], 1);
  1395. dquot_decr_space(transfer_from[cnt], space);
  1396. }
  1397. dquot_incr_inodes(transfer_to[cnt], 1);
  1398. dquot_incr_space(transfer_to[cnt], space);
  1399. inode->i_dquot[cnt] = transfer_to[cnt];
  1400. }
  1401. ret = QUOTA_OK;
  1402. warn_put_all:
  1403. spin_unlock(&dq_data_lock);
  1404. /* Dirtify all the dquots - this can block when journalling */
  1405. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1406. if (transfer_from[cnt])
  1407. mark_dquot_dirty(transfer_from[cnt]);
  1408. if (transfer_to[cnt])
  1409. mark_dquot_dirty(transfer_to[cnt]);
  1410. }
  1411. flush_warnings(transfer_to, warntype_to);
  1412. flush_warnings(transfer_from, warntype_from_inodes);
  1413. flush_warnings(transfer_from, warntype_from_space);
  1414. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1415. if (ret == QUOTA_OK && transfer_from[cnt] != NODQUOT)
  1416. dqput(transfer_from[cnt]);
  1417. if (ret == NO_QUOTA && transfer_to[cnt] != NODQUOT)
  1418. dqput(transfer_to[cnt]);
  1419. }
  1420. up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
  1421. return ret;
  1422. }
  1423. /* Wrapper for transferring ownership of an inode */
  1424. int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
  1425. {
  1426. if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) {
  1427. vfs_dq_init(inode);
  1428. if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA)
  1429. return 1;
  1430. }
  1431. return 0;
  1432. }
  1433. /*
  1434. * Write info of quota file to disk
  1435. */
  1436. int dquot_commit_info(struct super_block *sb, int type)
  1437. {
  1438. int ret;
  1439. struct quota_info *dqopt = sb_dqopt(sb);
  1440. mutex_lock(&dqopt->dqio_mutex);
  1441. ret = dqopt->ops[type]->write_file_info(sb, type);
  1442. mutex_unlock(&dqopt->dqio_mutex);
  1443. return ret;
  1444. }
  1445. /*
  1446. * Definitions of diskquota operations.
  1447. */
  1448. struct dquot_operations dquot_operations = {
  1449. .initialize = dquot_initialize,
  1450. .drop = dquot_drop,
  1451. .alloc_space = dquot_alloc_space,
  1452. .alloc_inode = dquot_alloc_inode,
  1453. .free_space = dquot_free_space,
  1454. .free_inode = dquot_free_inode,
  1455. .transfer = dquot_transfer,
  1456. .write_dquot = dquot_commit,
  1457. .acquire_dquot = dquot_acquire,
  1458. .release_dquot = dquot_release,
  1459. .mark_dirty = dquot_mark_dquot_dirty,
  1460. .write_info = dquot_commit_info,
  1461. .alloc_dquot = dquot_alloc,
  1462. .destroy_dquot = dquot_destroy,
  1463. };
  1464. /*
  1465. * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
  1466. */
  1467. int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
  1468. {
  1469. int cnt, ret = 0;
  1470. struct quota_info *dqopt = sb_dqopt(sb);
  1471. struct inode *toputinode[MAXQUOTAS];
  1472. /* Cannot turn off usage accounting without turning off limits, or
  1473. * suspend quotas and simultaneously turn quotas off. */
  1474. if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
  1475. || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
  1476. DQUOT_USAGE_ENABLED)))
  1477. return -EINVAL;
  1478. /* We need to serialize quota_off() for device */
  1479. mutex_lock(&dqopt->dqonoff_mutex);
  1480. /*
  1481. * Skip everything if there's nothing to do. We have to do this because
  1482. * sometimes we are called when fill_super() failed and calling
  1483. * sync_fs() in such cases does no good.
  1484. */
  1485. if (!sb_any_quota_loaded(sb)) {
  1486. mutex_unlock(&dqopt->dqonoff_mutex);
  1487. return 0;
  1488. }
  1489. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1490. toputinode[cnt] = NULL;
  1491. if (type != -1 && cnt != type)
  1492. continue;
  1493. if (!sb_has_quota_loaded(sb, cnt))
  1494. continue;
  1495. if (flags & DQUOT_SUSPENDED) {
  1496. dqopt->flags |=
  1497. dquot_state_flag(DQUOT_SUSPENDED, cnt);
  1498. } else {
  1499. dqopt->flags &= ~dquot_state_flag(flags, cnt);
  1500. /* Turning off suspended quotas? */
  1501. if (!sb_has_quota_loaded(sb, cnt) &&
  1502. sb_has_quota_suspended(sb, cnt)) {
  1503. dqopt->flags &= ~dquot_state_flag(
  1504. DQUOT_SUSPENDED, cnt);
  1505. iput(dqopt->files[cnt]);
  1506. dqopt->files[cnt] = NULL;
  1507. continue;
  1508. }
  1509. }
  1510. /* We still have to keep quota loaded? */
  1511. if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
  1512. continue;
  1513. /* Note: these are blocking operations */
  1514. drop_dquot_ref(sb, cnt);
  1515. invalidate_dquots(sb, cnt);
  1516. /*
  1517. * Now all dquots should be invalidated, all writes done so we should be only
  1518. * users of the info. No locks needed.
  1519. */
  1520. if (info_dirty(&dqopt->info[cnt]))
  1521. sb->dq_op->write_info(sb, cnt);
  1522. if (dqopt->ops[cnt]->free_file_info)
  1523. dqopt->ops[cnt]->free_file_info(sb, cnt);
  1524. put_quota_format(dqopt->info[cnt].dqi_format);
  1525. toputinode[cnt] = dqopt->files[cnt];
  1526. if (!sb_has_quota_loaded(sb, cnt))
  1527. dqopt->files[cnt] = NULL;
  1528. dqopt->info[cnt].dqi_flags = 0;
  1529. dqopt->info[cnt].dqi_igrace = 0;
  1530. dqopt->info[cnt].dqi_bgrace = 0;
  1531. dqopt->ops[cnt] = NULL;
  1532. }
  1533. mutex_unlock(&dqopt->dqonoff_mutex);
  1534. /* Skip syncing and setting flags if quota files are hidden */
  1535. if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
  1536. goto put_inodes;
  1537. /* Sync the superblock so that buffers with quota data are written to
  1538. * disk (and so userspace sees correct data afterwards). */
  1539. if (sb->s_op->sync_fs)
  1540. sb->s_op->sync_fs(sb, 1);
  1541. sync_blockdev(sb->s_bdev);
  1542. /* Now the quota files are just ordinary files and we can set the
  1543. * inode flags back. Moreover we discard the pagecache so that
  1544. * userspace sees the writes we did bypassing the pagecache. We
  1545. * must also discard the blockdev buffers so that we see the
  1546. * changes done by userspace on the next quotaon() */
  1547. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1548. if (toputinode[cnt]) {
  1549. mutex_lock(&dqopt->dqonoff_mutex);
  1550. /* If quota was reenabled in the meantime, we have
  1551. * nothing to do */
  1552. if (!sb_has_quota_loaded(sb, cnt)) {
  1553. mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);
  1554. toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
  1555. S_NOATIME | S_NOQUOTA);
  1556. truncate_inode_pages(&toputinode[cnt]->i_data, 0);
  1557. mutex_unlock(&toputinode[cnt]->i_mutex);
  1558. mark_inode_dirty(toputinode[cnt]);
  1559. }
  1560. mutex_unlock(&dqopt->dqonoff_mutex);
  1561. }
  1562. if (sb->s_bdev)
  1563. invalidate_bdev(sb->s_bdev);
  1564. put_inodes:
  1565. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1566. if (toputinode[cnt]) {
  1567. /* On remount RO, we keep the inode pointer so that we
  1568. * can reenable quota on the subsequent remount RW. We
  1569. * have to check 'flags' variable and not use sb_has_
  1570. * function because another quotaon / quotaoff could
  1571. * change global state before we got here. We refuse
  1572. * to suspend quotas when there is pending delete on
  1573. * the quota file... */
  1574. if (!(flags & DQUOT_SUSPENDED))
  1575. iput(toputinode[cnt]);
  1576. else if (!toputinode[cnt]->i_nlink)
  1577. ret = -EBUSY;
  1578. }
  1579. return ret;
  1580. }
  1581. int vfs_quota_off(struct super_block *sb, int type, int remount)
  1582. {
  1583. return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED :
  1584. (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED));
  1585. }
  1586. /*
  1587. * Turn quotas on on a device
  1588. */
  1589. /*
  1590. * Helper function to turn quotas on when we already have the inode of
  1591. * quota file and no quota information is loaded.
  1592. */
  1593. static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
  1594. unsigned int flags)
  1595. {
  1596. struct quota_format_type *fmt = find_quota_format(format_id);
  1597. struct super_block *sb = inode->i_sb;
  1598. struct quota_info *dqopt = sb_dqopt(sb);
  1599. int error;
  1600. int oldflags = -1;
  1601. if (!fmt)
  1602. return -ESRCH;
  1603. if (!S_ISREG(inode->i_mode)) {
  1604. error = -EACCES;
  1605. goto out_fmt;
  1606. }
  1607. if (IS_RDONLY(inode)) {
  1608. error = -EROFS;
  1609. goto out_fmt;
  1610. }
  1611. if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
  1612. error = -EINVAL;
  1613. goto out_fmt;
  1614. }
  1615. /* Usage always has to be set... */
  1616. if (!(flags & DQUOT_USAGE_ENABLED)) {
  1617. error = -EINVAL;
  1618. goto out_fmt;
  1619. }
  1620. if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
  1621. /* As we bypass the pagecache we must now flush the inode so
  1622. * that we see all the changes from userspace... */
  1623. write_inode_now(inode, 1);
  1624. /* And now flush the block cache so that kernel sees the
  1625. * changes */
  1626. invalidate_bdev(sb->s_bdev);
  1627. }
  1628. mutex_lock(&inode->i_mutex);
  1629. mutex_lock(&dqopt->dqonoff_mutex);
  1630. if (sb_has_quota_loaded(sb, type)) {
  1631. error = -EBUSY;
  1632. goto out_lock;
  1633. }
  1634. if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
  1635. /* We don't want quota and atime on quota files (deadlocks
  1636. * possible) Also nobody should write to the file - we use
  1637. * special IO operations which ignore the immutable bit. */
  1638. down_write(&dqopt->dqptr_sem);
  1639. oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA);
  1640. inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
  1641. up_write(&dqopt->dqptr_sem);
  1642. sb->dq_op->drop(inode);
  1643. }
  1644. error = -EIO;
  1645. dqopt->files[type] = igrab(inode);
  1646. if (!dqopt->files[type])
  1647. goto out_lock;
  1648. error = -EINVAL;
  1649. if (!fmt->qf_ops->check_quota_file(sb, type))
  1650. goto out_file_init;
  1651. dqopt->ops[type] = fmt->qf_ops;
  1652. dqopt->info[type].dqi_format = fmt;
  1653. dqopt->info[type].dqi_fmt_id = format_id;
  1654. INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
  1655. mutex_lock(&dqopt->dqio_mutex);
  1656. if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
  1657. mutex_unlock(&dqopt->dqio_mutex);
  1658. goto out_file_init;
  1659. }
  1660. mutex_unlock(&dqopt->dqio_mutex);
  1661. mutex_unlock(&inode->i_mutex);
  1662. dqopt->flags |= dquot_state_flag(flags, type);
  1663. add_dquot_ref(sb, type);
  1664. mutex_unlock(&dqopt->dqonoff_mutex);
  1665. return 0;
  1666. out_file_init:
  1667. dqopt->files[type] = NULL;
  1668. iput(inode);
  1669. out_lock:
  1670. mutex_unlock(&dqopt->dqonoff_mutex);
  1671. if (oldflags != -1) {
  1672. down_write(&dqopt->dqptr_sem);
  1673. /* Set the flags back (in the case of accidental quotaon()
  1674. * on a wrong file we don't want to mess up the flags) */
  1675. inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
  1676. inode->i_flags |= oldflags;
  1677. up_write(&dqopt->dqptr_sem);
  1678. }
  1679. mutex_unlock(&inode->i_mutex);
  1680. out_fmt:
  1681. put_quota_format(fmt);
  1682. return error;
  1683. }
  1684. /* Reenable quotas on remount RW */
  1685. static int vfs_quota_on_remount(struct super_block *sb, int type)
  1686. {
  1687. struct quota_info *dqopt = sb_dqopt(sb);
  1688. struct inode *inode;
  1689. int ret;
  1690. unsigned int flags;
  1691. mutex_lock(&dqopt->dqonoff_mutex);
  1692. if (!sb_has_quota_suspended(sb, type)) {
  1693. mutex_unlock(&dqopt->dqonoff_mutex);
  1694. return 0;
  1695. }
  1696. inode = dqopt->files[type];
  1697. dqopt->files[type] = NULL;
  1698. flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
  1699. DQUOT_LIMITS_ENABLED, type);
  1700. dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type);
  1701. mutex_unlock(&dqopt->dqonoff_mutex);
  1702. flags = dquot_generic_flag(flags, type);
  1703. ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id,
  1704. flags);
  1705. iput(inode);
  1706. return ret;
  1707. }
  1708. int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
  1709. struct path *path)
  1710. {
  1711. int error = security_quota_on(path->dentry);
  1712. if (error)
  1713. return error;
  1714. /* Quota file not on the same filesystem? */
  1715. if (path->mnt->mnt_sb != sb)
  1716. error = -EXDEV;
  1717. else
  1718. error = vfs_load_quota_inode(path->dentry->d_inode, type,
  1719. format_id, DQUOT_USAGE_ENABLED |
  1720. DQUOT_LIMITS_ENABLED);
  1721. return error;
  1722. }
  1723. int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
  1724. int remount)
  1725. {
  1726. struct path path;
  1727. int error;
  1728. if (remount)
  1729. return vfs_quota_on_remount(sb, type);
  1730. error = kern_path(name, LOOKUP_FOLLOW, &path);
  1731. if (!error) {
  1732. error = vfs_quota_on_path(sb, type, format_id, &path);
  1733. path_put(&path);
  1734. }
  1735. return error;
  1736. }
  1737. /*
  1738. * More powerful function for turning on quotas allowing setting
  1739. * of individual quota flags
  1740. */
  1741. int vfs_quota_enable(struct inode *inode, int type, int format_id,
  1742. unsigned int flags)
  1743. {
  1744. int ret = 0;
  1745. struct super_block *sb = inode->i_sb;
  1746. struct quota_info *dqopt = sb_dqopt(sb);
  1747. /* Just unsuspend quotas? */
  1748. if (flags & DQUOT_SUSPENDED)
  1749. return vfs_quota_on_remount(sb, type);
  1750. if (!flags)
  1751. return 0;
  1752. /* Just updating flags needed? */
  1753. if (sb_has_quota_loaded(sb, type)) {
  1754. mutex_lock(&dqopt->dqonoff_mutex);
  1755. /* Now do a reliable test... */
  1756. if (!sb_has_quota_loaded(sb, type)) {
  1757. mutex_unlock(&dqopt->dqonoff_mutex);
  1758. goto load_quota;
  1759. }
  1760. if (flags & DQUOT_USAGE_ENABLED &&
  1761. sb_has_quota_usage_enabled(sb, type)) {
  1762. ret = -EBUSY;
  1763. goto out_lock;
  1764. }
  1765. if (flags & DQUOT_LIMITS_ENABLED &&
  1766. sb_has_quota_limits_enabled(sb, type)) {
  1767. ret = -EBUSY;
  1768. goto out_lock;
  1769. }
  1770. sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
  1771. out_lock:
  1772. mutex_unlock(&dqopt->dqonoff_mutex);
  1773. return ret;
  1774. }
  1775. load_quota:
  1776. return vfs_load_quota_inode(inode, type, format_id, flags);
  1777. }
  1778. /*
  1779. * This function is used when filesystem needs to initialize quotas
  1780. * during mount time.
  1781. */
  1782. int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
  1783. int format_id, int type)
  1784. {
  1785. struct dentry *dentry;
  1786. int error;
  1787. dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
  1788. if (IS_ERR(dentry))
  1789. return PTR_ERR(dentry);
  1790. if (!dentry->d_inode) {
  1791. error = -ENOENT;
  1792. goto out;
  1793. }
  1794. error = security_quota_on(dentry);
  1795. if (!error)
  1796. error = vfs_load_quota_inode(dentry->d_inode, type, format_id,
  1797. DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  1798. out:
  1799. dput(dentry);
  1800. return error;
  1801. }
  1802. /* Wrapper to turn on quotas when remounting rw */
  1803. int vfs_dq_quota_on_remount(struct super_block *sb)
  1804. {
  1805. int cnt;
  1806. int ret = 0, err;
  1807. if (!sb->s_qcop || !sb->s_qcop->quota_on)
  1808. return -ENOSYS;
  1809. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1810. err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
  1811. if (err < 0 && !ret)
  1812. ret = err;
  1813. }
  1814. return ret;
  1815. }
  1816. static inline qsize_t qbtos(qsize_t blocks)
  1817. {
  1818. return blocks << QIF_DQBLKSIZE_BITS;
  1819. }
  1820. static inline qsize_t stoqb(qsize_t space)
  1821. {
  1822. return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
  1823. }
  1824. /* Generic routine for getting common part of quota structure */
  1825. static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
  1826. {
  1827. struct mem_dqblk *dm = &dquot->dq_dqb;
  1828. spin_lock(&dq_data_lock);
  1829. di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
  1830. di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
  1831. di->dqb_curspace = dm->dqb_curspace;
  1832. di->dqb_ihardlimit = dm->dqb_ihardlimit;
  1833. di->dqb_isoftlimit = dm->dqb_isoftlimit;
  1834. di->dqb_curinodes = dm->dqb_curinodes;
  1835. di->dqb_btime = dm->dqb_btime;
  1836. di->dqb_itime = dm->dqb_itime;
  1837. di->dqb_valid = QIF_ALL;
  1838. spin_unlock(&dq_data_lock);
  1839. }
  1840. int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
  1841. {
  1842. struct dquot *dquot;
  1843. mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
  1844. if (!(dquot = dqget(sb, id, type))) {
  1845. mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
  1846. return -ESRCH;
  1847. }
  1848. do_get_dqblk(dquot, di);
  1849. dqput(dquot);
  1850. mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
  1851. return 0;
  1852. }
  1853. /* Generic routine for setting common part of quota structure */
  1854. static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
  1855. {
  1856. struct mem_dqblk *dm = &dquot->dq_dqb;
  1857. int check_blim = 0, check_ilim = 0;
  1858. struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
  1859. if ((di->dqb_valid & QIF_BLIMITS &&
  1860. (di->dqb_bhardlimit > dqi->dqi_maxblimit ||
  1861. di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
  1862. (di->dqb_valid & QIF_ILIMITS &&
  1863. (di->dqb_ihardlimit > dqi->dqi_maxilimit ||
  1864. di->dqb_isoftlimit > dqi->dqi_maxilimit)))
  1865. return -ERANGE;
  1866. spin_lock(&dq_data_lock);
  1867. if (di->dqb_valid & QIF_SPACE) {
  1868. dm->dqb_curspace = di->dqb_curspace;
  1869. check_blim = 1;
  1870. __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  1871. }
  1872. if (di->dqb_valid & QIF_BLIMITS) {
  1873. dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
  1874. dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
  1875. check_blim = 1;
  1876. __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  1877. }
  1878. if (di->dqb_valid & QIF_INODES) {
  1879. dm->dqb_curinodes = di->dqb_curinodes;
  1880. check_ilim = 1;
  1881. __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  1882. }
  1883. if (di->dqb_valid & QIF_ILIMITS) {
  1884. dm->dqb_isoftlimit = di->dqb_isoftlimit;
  1885. dm->dqb_ihardlimit = di->dqb_ihardlimit;
  1886. check_ilim = 1;
  1887. __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  1888. }
  1889. if (di->dqb_valid & QIF_BTIME) {
  1890. dm->dqb_btime = di->dqb_btime;
  1891. __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  1892. }
  1893. if (di->dqb_valid & QIF_ITIME) {
  1894. dm->dqb_itime = di->dqb_itime;
  1895. __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  1896. }
  1897. if (check_blim) {
  1898. if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) {
  1899. dm->dqb_btime = 0;
  1900. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  1901. }
  1902. else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */
  1903. dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
  1904. }
  1905. if (check_ilim) {
  1906. if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
  1907. dm->dqb_itime = 0;
  1908. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  1909. }
  1910. else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */
  1911. dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
  1912. }
  1913. if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
  1914. clear_bit(DQ_FAKE_B, &dquot->dq_flags);
  1915. else
  1916. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  1917. spin_unlock(&dq_data_lock);
  1918. mark_dquot_dirty(dquot);
  1919. return 0;
  1920. }
  1921. int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
  1922. {
  1923. struct dquot *dquot;
  1924. int rc;
  1925. mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
  1926. dquot = dqget(sb, id, type);
  1927. if (!dquot) {
  1928. rc = -ESRCH;
  1929. goto out;
  1930. }
  1931. rc = do_set_dqblk(dquot, di);
  1932. dqput(dquot);
  1933. out:
  1934. mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
  1935. return rc;
  1936. }
  1937. /* Generic routine for getting common part of quota file information */
  1938. int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
  1939. {
  1940. struct mem_dqinfo *mi;
  1941. mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
  1942. if (!sb_has_quota_active(sb, type)) {
  1943. mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
  1944. return -ESRCH;
  1945. }
  1946. mi = sb_dqopt(sb)->info + type;
  1947. spin_lock(&dq_data_lock);
  1948. ii->dqi_bgrace = mi->dqi_bgrace;
  1949. ii->dqi_igrace = mi->dqi_igrace;
  1950. ii->dqi_flags = mi->dqi_flags & DQF_MASK;
  1951. ii->dqi_valid = IIF_ALL;
  1952. spin_unlock(&dq_data_lock);
  1953. mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
  1954. return 0;
  1955. }
  1956. /* Generic routine for setting common part of quota file information */
  1957. int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
  1958. {
  1959. struct mem_dqinfo *mi;
  1960. int err = 0;
  1961. mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
  1962. if (!sb_has_quota_active(sb, type)) {
  1963. err = -ESRCH;
  1964. goto out;
  1965. }
  1966. mi = sb_dqopt(sb)->info + type;
  1967. spin_lock(&dq_data_lock);
  1968. if (ii->dqi_valid & IIF_BGRACE)
  1969. mi->dqi_bgrace = ii->dqi_bgrace;
  1970. if (ii->dqi_valid & IIF_IGRACE)
  1971. mi->dqi_igrace = ii->dqi_igrace;
  1972. if (ii->dqi_valid & IIF_FLAGS)
  1973. mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK);
  1974. spin_unlock(&dq_data_lock);
  1975. mark_info_dirty(sb, type);
  1976. /* Force write to disk */
  1977. sb->dq_op->write_info(sb, type);
  1978. out:
  1979. mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
  1980. return err;
  1981. }
  1982. struct quotactl_ops vfs_quotactl_ops = {
  1983. .quota_on = vfs_quota_on,
  1984. .quota_off = vfs_quota_off,
  1985. .quota_sync = vfs_quota_sync,
  1986. .get_info = vfs_get_dqinfo,
  1987. .set_info = vfs_set_dqinfo,
  1988. .get_dqblk = vfs_get_dqblk,
  1989. .set_dqblk = vfs_set_dqblk
  1990. };
  1991. static ctl_table fs_dqstats_table[] = {
  1992. {
  1993. .ctl_name = FS_DQ_LOOKUPS,
  1994. .procname = "lookups",
  1995. .data = &dqstats.lookups,
  1996. .maxlen = sizeof(int),
  1997. .mode = 0444,
  1998. .proc_handler = &proc_dointvec,
  1999. },
  2000. {
  2001. .ctl_name = FS_DQ_DROPS,
  2002. .procname = "drops",
  2003. .data = &dqstats.drops,
  2004. .maxlen = sizeof(int),
  2005. .mode = 0444,
  2006. .proc_handler = &proc_dointvec,
  2007. },
  2008. {
  2009. .ctl_name = FS_DQ_READS,
  2010. .procname = "reads",
  2011. .data = &dqstats.reads,
  2012. .maxlen = sizeof(int),
  2013. .mode = 0444,
  2014. .proc_handler = &proc_dointvec,
  2015. },
  2016. {
  2017. .ctl_name = FS_DQ_WRITES,
  2018. .procname = "writes",
  2019. .data = &dqstats.writes,
  2020. .maxlen = sizeof(int),
  2021. .mode = 0444,
  2022. .proc_handler = &proc_dointvec,
  2023. },
  2024. {
  2025. .ctl_name = FS_DQ_CACHE_HITS,
  2026. .procname = "cache_hits",
  2027. .data = &dqstats.cache_hits,
  2028. .maxlen = sizeof(int),
  2029. .mode = 0444,
  2030. .proc_handler = &proc_dointvec,
  2031. },
  2032. {
  2033. .ctl_name = FS_DQ_ALLOCATED,
  2034. .procname = "allocated_dquots",
  2035. .data = &dqstats.allocated_dquots,
  2036. .maxlen = sizeof(int),
  2037. .mode = 0444,
  2038. .proc_handler = &proc_dointvec,
  2039. },
  2040. {
  2041. .ctl_name = FS_DQ_FREE,
  2042. .procname = "free_dquots",
  2043. .data = &dqstats.free_dquots,
  2044. .maxlen = sizeof(int),
  2045. .mode = 0444,
  2046. .proc_handler = &proc_dointvec,
  2047. },
  2048. {
  2049. .ctl_name = FS_DQ_SYNCS,
  2050. .procname = "syncs",
  2051. .data = &dqstats.syncs,
  2052. .maxlen = sizeof(int),
  2053. .mode = 0444,
  2054. .proc_handler = &proc_dointvec,
  2055. },
  2056. #ifdef CONFIG_PRINT_QUOTA_WARNING
  2057. {
  2058. .ctl_name = FS_DQ_WARNINGS,
  2059. .procname = "warnings",
  2060. .data = &flag_print_warnings,
  2061. .maxlen = sizeof(int),
  2062. .mode = 0644,
  2063. .proc_handler = &proc_dointvec,
  2064. },
  2065. #endif
  2066. { .ctl_name = 0 },
  2067. };
  2068. static ctl_table fs_table[] = {
  2069. {
  2070. .ctl_name = FS_DQSTATS,
  2071. .procname = "quota",
  2072. .mode = 0555,
  2073. .child = fs_dqstats_table,
  2074. },
  2075. { .ctl_name = 0 },
  2076. };
  2077. static ctl_table sys_table[] = {
  2078. {
  2079. .ctl_name = CTL_FS,
  2080. .procname = "fs",
  2081. .mode = 0555,
  2082. .child = fs_table,
  2083. },
  2084. { .ctl_name = 0 },
  2085. };
  2086. static int __init dquot_init(void)
  2087. {
  2088. int i;
  2089. unsigned long nr_hash, order;
  2090. printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
  2091. register_sysctl_table(sys_table);
  2092. dquot_cachep = kmem_cache_create("dquot",
  2093. sizeof(struct dquot), sizeof(unsigned long) * 4,
  2094. (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
  2095. SLAB_MEM_SPREAD|SLAB_PANIC),
  2096. NULL);
  2097. order = 0;
  2098. dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
  2099. if (!dquot_hash)
  2100. panic("Cannot create dquot hash table");
  2101. /* Find power-of-two hlist_heads which can fit into allocation */
  2102. nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
  2103. dq_hash_bits = 0;
  2104. do {
  2105. dq_hash_bits++;
  2106. } while (nr_hash >> dq_hash_bits);
  2107. dq_hash_bits--;
  2108. nr_hash = 1UL << dq_hash_bits;
  2109. dq_hash_mask = nr_hash - 1;
  2110. for (i = 0; i < nr_hash; i++)
  2111. INIT_HLIST_HEAD(dquot_hash + i);
  2112. printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
  2113. nr_hash, order, (PAGE_SIZE << order));
  2114. register_shrinker(&dqcache_shrinker);
  2115. #ifdef CONFIG_QUOTA_NETLINK_INTERFACE
  2116. if (genl_register_family(&quota_genl_family) != 0)
  2117. printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n");
  2118. #endif
  2119. return 0;
  2120. }
  2121. module_init(dquot_init);
  2122. EXPORT_SYMBOL(register_quota_format);
  2123. EXPORT_SYMBOL(unregister_quota_format);
  2124. EXPORT_SYMBOL(dqstats);
  2125. EXPORT_SYMBOL(dq_data_lock);
  2126. EXPORT_SYMBOL(vfs_quota_enable);
  2127. EXPORT_SYMBOL(vfs_quota_on);
  2128. EXPORT_SYMBOL(vfs_quota_on_path);
  2129. EXPORT_SYMBOL(vfs_quota_on_mount);
  2130. EXPORT_SYMBOL(vfs_quota_disable);
  2131. EXPORT_SYMBOL(vfs_quota_off);
  2132. EXPORT_SYMBOL(dquot_scan_active);
  2133. EXPORT_SYMBOL(vfs_quota_sync);
  2134. EXPORT_SYMBOL(vfs_get_dqinfo);
  2135. EXPORT_SYMBOL(vfs_set_dqinfo);
  2136. EXPORT_SYMBOL(vfs_get_dqblk);
  2137. EXPORT_SYMBOL(vfs_set_dqblk);
  2138. EXPORT_SYMBOL(dquot_commit);
  2139. EXPORT_SYMBOL(dquot_commit_info);
  2140. EXPORT_SYMBOL(dquot_acquire);
  2141. EXPORT_SYMBOL(dquot_release);
  2142. EXPORT_SYMBOL(dquot_mark_dquot_dirty);
  2143. EXPORT_SYMBOL(dquot_initialize);
  2144. EXPORT_SYMBOL(dquot_drop);
  2145. EXPORT_SYMBOL(dquot_drop_locked);
  2146. EXPORT_SYMBOL(vfs_dq_drop);
  2147. EXPORT_SYMBOL(dqget);
  2148. EXPORT_SYMBOL(dqput);
  2149. EXPORT_SYMBOL(dquot_is_cached);
  2150. EXPORT_SYMBOL(dquot_alloc_space);
  2151. EXPORT_SYMBOL(dquot_alloc_inode);
  2152. EXPORT_SYMBOL(dquot_free_space);
  2153. EXPORT_SYMBOL(dquot_free_inode);
  2154. EXPORT_SYMBOL(dquot_transfer);
  2155. EXPORT_SYMBOL(vfs_dq_transfer);
  2156. EXPORT_SYMBOL(vfs_dq_quota_on_remount);