xfs_inode.c 88 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <linux/log2.h>
  19. #include "xfs.h"
  20. #include "xfs_fs.h"
  21. #include "xfs_format.h"
  22. #include "xfs_shared.h"
  23. #include "xfs_log.h"
  24. #include "xfs_inum.h"
  25. #include "xfs_trans.h"
  26. #include "xfs_trans_space.h"
  27. #include "xfs_trans_priv.h"
  28. #include "xfs_sb.h"
  29. #include "xfs_ag.h"
  30. #include "xfs_mount.h"
  31. #include "xfs_da_format.h"
  32. #include "xfs_da_btree.h"
  33. #include "xfs_dir2.h"
  34. #include "xfs_bmap_btree.h"
  35. #include "xfs_alloc_btree.h"
  36. #include "xfs_ialloc_btree.h"
  37. #include "xfs_attr_sf.h"
  38. #include "xfs_attr.h"
  39. #include "xfs_dinode.h"
  40. #include "xfs_inode.h"
  41. #include "xfs_buf_item.h"
  42. #include "xfs_inode_item.h"
  43. #include "xfs_btree.h"
  44. #include "xfs_alloc.h"
  45. #include "xfs_ialloc.h"
  46. #include "xfs_bmap.h"
  47. #include "xfs_bmap_util.h"
  48. #include "xfs_error.h"
  49. #include "xfs_quota.h"
  50. #include "xfs_filestream.h"
  51. #include "xfs_cksum.h"
  52. #include "xfs_trace.h"
  53. #include "xfs_icache.h"
  54. #include "xfs_symlink.h"
  55. kmem_zone_t *xfs_inode_zone;
  56. /*
  57. * Used in xfs_itruncate_extents(). This is the maximum number of extents
  58. * freed from a file in a single transaction.
  59. */
  60. #define XFS_ITRUNC_MAX_EXTENTS 2
  61. STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
  62. /*
  63. * helper function to extract extent size hint from inode
  64. */
  65. xfs_extlen_t
  66. xfs_get_extsz_hint(
  67. struct xfs_inode *ip)
  68. {
  69. if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
  70. return ip->i_d.di_extsize;
  71. if (XFS_IS_REALTIME_INODE(ip))
  72. return ip->i_mount->m_sb.sb_rextsize;
  73. return 0;
  74. }
  75. /*
  76. * This is a wrapper routine around the xfs_ilock() routine used to centralize
  77. * some grungy code. It is used in places that wish to lock the inode solely
  78. * for reading the extents. The reason these places can't just call
  79. * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the
  80. * extents from disk for a file in b-tree format. If the inode is in b-tree
  81. * format, then we need to lock the inode exclusively until the extents are read
  82. * in. Locking it exclusively all the time would limit our parallelism
  83. * unnecessarily, though. What we do instead is check to see if the extents
  84. * have been read in yet, and only lock the inode exclusively if they have not.
  85. *
  86. * The function returns a value which should be given to the corresponding
  87. * xfs_iunlock_map_shared(). This value is the mode in which the lock was
  88. * actually taken.
  89. */
  90. uint
  91. xfs_ilock_map_shared(
  92. xfs_inode_t *ip)
  93. {
  94. uint lock_mode;
  95. if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
  96. ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
  97. lock_mode = XFS_ILOCK_EXCL;
  98. } else {
  99. lock_mode = XFS_ILOCK_SHARED;
  100. }
  101. xfs_ilock(ip, lock_mode);
  102. return lock_mode;
  103. }
  104. /*
  105. * This is simply the unlock routine to go with xfs_ilock_map_shared().
  106. * All it does is call xfs_iunlock() with the given lock_mode.
  107. */
  108. void
  109. xfs_iunlock_map_shared(
  110. xfs_inode_t *ip,
  111. unsigned int lock_mode)
  112. {
  113. xfs_iunlock(ip, lock_mode);
  114. }
  115. /*
  116. * The xfs inode contains 2 locks: a multi-reader lock called the
  117. * i_iolock and a multi-reader lock called the i_lock. This routine
  118. * allows either or both of the locks to be obtained.
  119. *
  120. * The 2 locks should always be ordered so that the IO lock is
  121. * obtained first in order to prevent deadlock.
  122. *
  123. * ip -- the inode being locked
  124. * lock_flags -- this parameter indicates the inode's locks
  125. * to be locked. It can be:
  126. * XFS_IOLOCK_SHARED,
  127. * XFS_IOLOCK_EXCL,
  128. * XFS_ILOCK_SHARED,
  129. * XFS_ILOCK_EXCL,
  130. * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
  131. * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
  132. * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
  133. * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
  134. */
  135. void
  136. xfs_ilock(
  137. xfs_inode_t *ip,
  138. uint lock_flags)
  139. {
  140. trace_xfs_ilock(ip, lock_flags, _RET_IP_);
  141. /*
  142. * You can't set both SHARED and EXCL for the same lock,
  143. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  144. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  145. */
  146. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  147. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  148. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  149. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  150. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  151. if (lock_flags & XFS_IOLOCK_EXCL)
  152. mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  153. else if (lock_flags & XFS_IOLOCK_SHARED)
  154. mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  155. if (lock_flags & XFS_ILOCK_EXCL)
  156. mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  157. else if (lock_flags & XFS_ILOCK_SHARED)
  158. mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  159. }
  160. /*
  161. * This is just like xfs_ilock(), except that the caller
  162. * is guaranteed not to sleep. It returns 1 if it gets
  163. * the requested locks and 0 otherwise. If the IO lock is
  164. * obtained but the inode lock cannot be, then the IO lock
  165. * is dropped before returning.
  166. *
  167. * ip -- the inode being locked
  168. * lock_flags -- this parameter indicates the inode's locks to be
  169. * to be locked. See the comment for xfs_ilock() for a list
  170. * of valid values.
  171. */
  172. int
  173. xfs_ilock_nowait(
  174. xfs_inode_t *ip,
  175. uint lock_flags)
  176. {
  177. trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
  178. /*
  179. * You can't set both SHARED and EXCL for the same lock,
  180. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  181. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  182. */
  183. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  184. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  185. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  186. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  187. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  188. if (lock_flags & XFS_IOLOCK_EXCL) {
  189. if (!mrtryupdate(&ip->i_iolock))
  190. goto out;
  191. } else if (lock_flags & XFS_IOLOCK_SHARED) {
  192. if (!mrtryaccess(&ip->i_iolock))
  193. goto out;
  194. }
  195. if (lock_flags & XFS_ILOCK_EXCL) {
  196. if (!mrtryupdate(&ip->i_lock))
  197. goto out_undo_iolock;
  198. } else if (lock_flags & XFS_ILOCK_SHARED) {
  199. if (!mrtryaccess(&ip->i_lock))
  200. goto out_undo_iolock;
  201. }
  202. return 1;
  203. out_undo_iolock:
  204. if (lock_flags & XFS_IOLOCK_EXCL)
  205. mrunlock_excl(&ip->i_iolock);
  206. else if (lock_flags & XFS_IOLOCK_SHARED)
  207. mrunlock_shared(&ip->i_iolock);
  208. out:
  209. return 0;
  210. }
  211. /*
  212. * xfs_iunlock() is used to drop the inode locks acquired with
  213. * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
  214. * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
  215. * that we know which locks to drop.
  216. *
  217. * ip -- the inode being unlocked
  218. * lock_flags -- this parameter indicates the inode's locks to be
  219. * to be unlocked. See the comment for xfs_ilock() for a list
  220. * of valid values for this parameter.
  221. *
  222. */
  223. void
  224. xfs_iunlock(
  225. xfs_inode_t *ip,
  226. uint lock_flags)
  227. {
  228. /*
  229. * You can't set both SHARED and EXCL for the same lock,
  230. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  231. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  232. */
  233. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  234. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  235. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  236. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  237. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  238. ASSERT(lock_flags != 0);
  239. if (lock_flags & XFS_IOLOCK_EXCL)
  240. mrunlock_excl(&ip->i_iolock);
  241. else if (lock_flags & XFS_IOLOCK_SHARED)
  242. mrunlock_shared(&ip->i_iolock);
  243. if (lock_flags & XFS_ILOCK_EXCL)
  244. mrunlock_excl(&ip->i_lock);
  245. else if (lock_flags & XFS_ILOCK_SHARED)
  246. mrunlock_shared(&ip->i_lock);
  247. trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
  248. }
  249. /*
  250. * give up write locks. the i/o lock cannot be held nested
  251. * if it is being demoted.
  252. */
  253. void
  254. xfs_ilock_demote(
  255. xfs_inode_t *ip,
  256. uint lock_flags)
  257. {
  258. ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
  259. ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
  260. if (lock_flags & XFS_ILOCK_EXCL)
  261. mrdemote(&ip->i_lock);
  262. if (lock_flags & XFS_IOLOCK_EXCL)
  263. mrdemote(&ip->i_iolock);
  264. trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
  265. }
  266. #if defined(DEBUG) || defined(XFS_WARN)
  267. int
  268. xfs_isilocked(
  269. xfs_inode_t *ip,
  270. uint lock_flags)
  271. {
  272. if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
  273. if (!(lock_flags & XFS_ILOCK_SHARED))
  274. return !!ip->i_lock.mr_writer;
  275. return rwsem_is_locked(&ip->i_lock.mr_lock);
  276. }
  277. if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
  278. if (!(lock_flags & XFS_IOLOCK_SHARED))
  279. return !!ip->i_iolock.mr_writer;
  280. return rwsem_is_locked(&ip->i_iolock.mr_lock);
  281. }
  282. ASSERT(0);
  283. return 0;
  284. }
  285. #endif
  286. #ifdef DEBUG
  287. int xfs_locked_n;
  288. int xfs_small_retries;
  289. int xfs_middle_retries;
  290. int xfs_lots_retries;
  291. int xfs_lock_delays;
  292. #endif
  293. /*
  294. * Bump the subclass so xfs_lock_inodes() acquires each lock with
  295. * a different value
  296. */
  297. static inline int
  298. xfs_lock_inumorder(int lock_mode, int subclass)
  299. {
  300. if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
  301. lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
  302. if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
  303. lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
  304. return lock_mode;
  305. }
  306. /*
  307. * The following routine will lock n inodes in exclusive mode.
  308. * We assume the caller calls us with the inodes in i_ino order.
  309. *
  310. * We need to detect deadlock where an inode that we lock
  311. * is in the AIL and we start waiting for another inode that is locked
  312. * by a thread in a long running transaction (such as truncate). This can
  313. * result in deadlock since the long running trans might need to wait
  314. * for the inode we just locked in order to push the tail and free space
  315. * in the log.
  316. */
  317. void
  318. xfs_lock_inodes(
  319. xfs_inode_t **ips,
  320. int inodes,
  321. uint lock_mode)
  322. {
  323. int attempts = 0, i, j, try_lock;
  324. xfs_log_item_t *lp;
  325. ASSERT(ips && (inodes >= 2)); /* we need at least two */
  326. try_lock = 0;
  327. i = 0;
  328. again:
  329. for (; i < inodes; i++) {
  330. ASSERT(ips[i]);
  331. if (i && (ips[i] == ips[i-1])) /* Already locked */
  332. continue;
  333. /*
  334. * If try_lock is not set yet, make sure all locked inodes
  335. * are not in the AIL.
  336. * If any are, set try_lock to be used later.
  337. */
  338. if (!try_lock) {
  339. for (j = (i - 1); j >= 0 && !try_lock; j--) {
  340. lp = (xfs_log_item_t *)ips[j]->i_itemp;
  341. if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
  342. try_lock++;
  343. }
  344. }
  345. }
  346. /*
  347. * If any of the previous locks we have locked is in the AIL,
  348. * we must TRY to get the second and subsequent locks. If
  349. * we can't get any, we must release all we have
  350. * and try again.
  351. */
  352. if (try_lock) {
  353. /* try_lock must be 0 if i is 0. */
  354. /*
  355. * try_lock means we have an inode locked
  356. * that is in the AIL.
  357. */
  358. ASSERT(i != 0);
  359. if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) {
  360. attempts++;
  361. /*
  362. * Unlock all previous guys and try again.
  363. * xfs_iunlock will try to push the tail
  364. * if the inode is in the AIL.
  365. */
  366. for(j = i - 1; j >= 0; j--) {
  367. /*
  368. * Check to see if we've already
  369. * unlocked this one.
  370. * Not the first one going back,
  371. * and the inode ptr is the same.
  372. */
  373. if ((j != (i - 1)) && ips[j] ==
  374. ips[j+1])
  375. continue;
  376. xfs_iunlock(ips[j], lock_mode);
  377. }
  378. if ((attempts % 5) == 0) {
  379. delay(1); /* Don't just spin the CPU */
  380. #ifdef DEBUG
  381. xfs_lock_delays++;
  382. #endif
  383. }
  384. i = 0;
  385. try_lock = 0;
  386. goto again;
  387. }
  388. } else {
  389. xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
  390. }
  391. }
  392. #ifdef DEBUG
  393. if (attempts) {
  394. if (attempts < 5) xfs_small_retries++;
  395. else if (attempts < 100) xfs_middle_retries++;
  396. else xfs_lots_retries++;
  397. } else {
  398. xfs_locked_n++;
  399. }
  400. #endif
  401. }
  402. /*
  403. * xfs_lock_two_inodes() can only be used to lock one type of lock
  404. * at a time - the iolock or the ilock, but not both at once. If
  405. * we lock both at once, lockdep will report false positives saying
  406. * we have violated locking orders.
  407. */
  408. void
  409. xfs_lock_two_inodes(
  410. xfs_inode_t *ip0,
  411. xfs_inode_t *ip1,
  412. uint lock_mode)
  413. {
  414. xfs_inode_t *temp;
  415. int attempts = 0;
  416. xfs_log_item_t *lp;
  417. if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
  418. ASSERT((lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) == 0);
  419. ASSERT(ip0->i_ino != ip1->i_ino);
  420. if (ip0->i_ino > ip1->i_ino) {
  421. temp = ip0;
  422. ip0 = ip1;
  423. ip1 = temp;
  424. }
  425. again:
  426. xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
  427. /*
  428. * If the first lock we have locked is in the AIL, we must TRY to get
  429. * the second lock. If we can't get it, we must release the first one
  430. * and try again.
  431. */
  432. lp = (xfs_log_item_t *)ip0->i_itemp;
  433. if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
  434. if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
  435. xfs_iunlock(ip0, lock_mode);
  436. if ((++attempts % 5) == 0)
  437. delay(1); /* Don't just spin the CPU */
  438. goto again;
  439. }
  440. } else {
  441. xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
  442. }
  443. }
  444. void
  445. __xfs_iflock(
  446. struct xfs_inode *ip)
  447. {
  448. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
  449. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
  450. do {
  451. prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  452. if (xfs_isiflocked(ip))
  453. io_schedule();
  454. } while (!xfs_iflock_nowait(ip));
  455. finish_wait(wq, &wait.wait);
  456. }
  457. STATIC uint
  458. _xfs_dic2xflags(
  459. __uint16_t di_flags)
  460. {
  461. uint flags = 0;
  462. if (di_flags & XFS_DIFLAG_ANY) {
  463. if (di_flags & XFS_DIFLAG_REALTIME)
  464. flags |= XFS_XFLAG_REALTIME;
  465. if (di_flags & XFS_DIFLAG_PREALLOC)
  466. flags |= XFS_XFLAG_PREALLOC;
  467. if (di_flags & XFS_DIFLAG_IMMUTABLE)
  468. flags |= XFS_XFLAG_IMMUTABLE;
  469. if (di_flags & XFS_DIFLAG_APPEND)
  470. flags |= XFS_XFLAG_APPEND;
  471. if (di_flags & XFS_DIFLAG_SYNC)
  472. flags |= XFS_XFLAG_SYNC;
  473. if (di_flags & XFS_DIFLAG_NOATIME)
  474. flags |= XFS_XFLAG_NOATIME;
  475. if (di_flags & XFS_DIFLAG_NODUMP)
  476. flags |= XFS_XFLAG_NODUMP;
  477. if (di_flags & XFS_DIFLAG_RTINHERIT)
  478. flags |= XFS_XFLAG_RTINHERIT;
  479. if (di_flags & XFS_DIFLAG_PROJINHERIT)
  480. flags |= XFS_XFLAG_PROJINHERIT;
  481. if (di_flags & XFS_DIFLAG_NOSYMLINKS)
  482. flags |= XFS_XFLAG_NOSYMLINKS;
  483. if (di_flags & XFS_DIFLAG_EXTSIZE)
  484. flags |= XFS_XFLAG_EXTSIZE;
  485. if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
  486. flags |= XFS_XFLAG_EXTSZINHERIT;
  487. if (di_flags & XFS_DIFLAG_NODEFRAG)
  488. flags |= XFS_XFLAG_NODEFRAG;
  489. if (di_flags & XFS_DIFLAG_FILESTREAM)
  490. flags |= XFS_XFLAG_FILESTREAM;
  491. }
  492. return flags;
  493. }
  494. uint
  495. xfs_ip2xflags(
  496. xfs_inode_t *ip)
  497. {
  498. xfs_icdinode_t *dic = &ip->i_d;
  499. return _xfs_dic2xflags(dic->di_flags) |
  500. (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
  501. }
  502. uint
  503. xfs_dic2xflags(
  504. xfs_dinode_t *dip)
  505. {
  506. return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
  507. (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
  508. }
  509. /*
  510. * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
  511. * is allowed, otherwise it has to be an exact match. If a CI match is found,
  512. * ci_name->name will point to a the actual name (caller must free) or
  513. * will be set to NULL if an exact match is found.
  514. */
  515. int
  516. xfs_lookup(
  517. xfs_inode_t *dp,
  518. struct xfs_name *name,
  519. xfs_inode_t **ipp,
  520. struct xfs_name *ci_name)
  521. {
  522. xfs_ino_t inum;
  523. int error;
  524. uint lock_mode;
  525. trace_xfs_lookup(dp, name);
  526. if (XFS_FORCED_SHUTDOWN(dp->i_mount))
  527. return XFS_ERROR(EIO);
  528. lock_mode = xfs_ilock_map_shared(dp);
  529. error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
  530. xfs_iunlock_map_shared(dp, lock_mode);
  531. if (error)
  532. goto out;
  533. error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
  534. if (error)
  535. goto out_free_name;
  536. return 0;
  537. out_free_name:
  538. if (ci_name)
  539. kmem_free(ci_name->name);
  540. out:
  541. *ipp = NULL;
  542. return error;
  543. }
  544. /*
  545. * Allocate an inode on disk and return a copy of its in-core version.
  546. * The in-core inode is locked exclusively. Set mode, nlink, and rdev
  547. * appropriately within the inode. The uid and gid for the inode are
  548. * set according to the contents of the given cred structure.
  549. *
  550. * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
  551. * has a free inode available, call xfs_iget() to obtain the in-core
  552. * version of the allocated inode. Finally, fill in the inode and
  553. * log its initial contents. In this case, ialloc_context would be
  554. * set to NULL.
  555. *
  556. * If xfs_dialloc() does not have an available inode, it will replenish
  557. * its supply by doing an allocation. Since we can only do one
  558. * allocation within a transaction without deadlocks, we must commit
  559. * the current transaction before returning the inode itself.
  560. * In this case, therefore, we will set ialloc_context and return.
  561. * The caller should then commit the current transaction, start a new
  562. * transaction, and call xfs_ialloc() again to actually get the inode.
  563. *
  564. * To ensure that some other process does not grab the inode that
  565. * was allocated during the first call to xfs_ialloc(), this routine
  566. * also returns the [locked] bp pointing to the head of the freelist
  567. * as ialloc_context. The caller should hold this buffer across
  568. * the commit and pass it back into this routine on the second call.
  569. *
  570. * If we are allocating quota inodes, we do not have a parent inode
  571. * to attach to or associate with (i.e. pip == NULL) because they
  572. * are not linked into the directory structure - they are attached
  573. * directly to the superblock - and so have no parent.
  574. */
  575. int
  576. xfs_ialloc(
  577. xfs_trans_t *tp,
  578. xfs_inode_t *pip,
  579. umode_t mode,
  580. xfs_nlink_t nlink,
  581. xfs_dev_t rdev,
  582. prid_t prid,
  583. int okalloc,
  584. xfs_buf_t **ialloc_context,
  585. xfs_inode_t **ipp)
  586. {
  587. struct xfs_mount *mp = tp->t_mountp;
  588. xfs_ino_t ino;
  589. xfs_inode_t *ip;
  590. uint flags;
  591. int error;
  592. timespec_t tv;
  593. int filestreams = 0;
  594. /*
  595. * Call the space management code to pick
  596. * the on-disk inode to be allocated.
  597. */
  598. error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
  599. ialloc_context, &ino);
  600. if (error)
  601. return error;
  602. if (*ialloc_context || ino == NULLFSINO) {
  603. *ipp = NULL;
  604. return 0;
  605. }
  606. ASSERT(*ialloc_context == NULL);
  607. /*
  608. * Get the in-core inode with the lock held exclusively.
  609. * This is because we're setting fields here we need
  610. * to prevent others from looking at until we're done.
  611. */
  612. error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
  613. XFS_ILOCK_EXCL, &ip);
  614. if (error)
  615. return error;
  616. ASSERT(ip != NULL);
  617. ip->i_d.di_mode = mode;
  618. ip->i_d.di_onlink = 0;
  619. ip->i_d.di_nlink = nlink;
  620. ASSERT(ip->i_d.di_nlink == nlink);
  621. ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
  622. ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
  623. xfs_set_projid(ip, prid);
  624. memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
  625. /*
  626. * If the superblock version is up to where we support new format
  627. * inodes and this is currently an old format inode, then change
  628. * the inode version number now. This way we only do the conversion
  629. * here rather than here and in the flush/logging code.
  630. */
  631. if (xfs_sb_version_hasnlink(&mp->m_sb) &&
  632. ip->i_d.di_version == 1) {
  633. ip->i_d.di_version = 2;
  634. /*
  635. * We've already zeroed the old link count, the projid field,
  636. * and the pad field.
  637. */
  638. }
  639. /*
  640. * Project ids won't be stored on disk if we are using a version 1 inode.
  641. */
  642. if ((prid != 0) && (ip->i_d.di_version == 1))
  643. xfs_bump_ino_vers2(tp, ip);
  644. if (pip && XFS_INHERIT_GID(pip)) {
  645. ip->i_d.di_gid = pip->i_d.di_gid;
  646. if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
  647. ip->i_d.di_mode |= S_ISGID;
  648. }
  649. }
  650. /*
  651. * If the group ID of the new file does not match the effective group
  652. * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
  653. * (and only if the irix_sgid_inherit compatibility variable is set).
  654. */
  655. if ((irix_sgid_inherit) &&
  656. (ip->i_d.di_mode & S_ISGID) &&
  657. (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid)))) {
  658. ip->i_d.di_mode &= ~S_ISGID;
  659. }
  660. ip->i_d.di_size = 0;
  661. ip->i_d.di_nextents = 0;
  662. ASSERT(ip->i_d.di_nblocks == 0);
  663. nanotime(&tv);
  664. ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
  665. ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
  666. ip->i_d.di_atime = ip->i_d.di_mtime;
  667. ip->i_d.di_ctime = ip->i_d.di_mtime;
  668. /*
  669. * di_gen will have been taken care of in xfs_iread.
  670. */
  671. ip->i_d.di_extsize = 0;
  672. ip->i_d.di_dmevmask = 0;
  673. ip->i_d.di_dmstate = 0;
  674. ip->i_d.di_flags = 0;
  675. if (ip->i_d.di_version == 3) {
  676. ASSERT(ip->i_d.di_ino == ino);
  677. ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid));
  678. ip->i_d.di_crc = 0;
  679. ip->i_d.di_changecount = 1;
  680. ip->i_d.di_lsn = 0;
  681. ip->i_d.di_flags2 = 0;
  682. memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2));
  683. ip->i_d.di_crtime = ip->i_d.di_mtime;
  684. }
  685. flags = XFS_ILOG_CORE;
  686. switch (mode & S_IFMT) {
  687. case S_IFIFO:
  688. case S_IFCHR:
  689. case S_IFBLK:
  690. case S_IFSOCK:
  691. ip->i_d.di_format = XFS_DINODE_FMT_DEV;
  692. ip->i_df.if_u2.if_rdev = rdev;
  693. ip->i_df.if_flags = 0;
  694. flags |= XFS_ILOG_DEV;
  695. break;
  696. case S_IFREG:
  697. /*
  698. * we can't set up filestreams until after the VFS inode
  699. * is set up properly.
  700. */
  701. if (pip && xfs_inode_is_filestream(pip))
  702. filestreams = 1;
  703. /* fall through */
  704. case S_IFDIR:
  705. if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
  706. uint di_flags = 0;
  707. if (S_ISDIR(mode)) {
  708. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  709. di_flags |= XFS_DIFLAG_RTINHERIT;
  710. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  711. di_flags |= XFS_DIFLAG_EXTSZINHERIT;
  712. ip->i_d.di_extsize = pip->i_d.di_extsize;
  713. }
  714. } else if (S_ISREG(mode)) {
  715. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  716. di_flags |= XFS_DIFLAG_REALTIME;
  717. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  718. di_flags |= XFS_DIFLAG_EXTSIZE;
  719. ip->i_d.di_extsize = pip->i_d.di_extsize;
  720. }
  721. }
  722. if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
  723. xfs_inherit_noatime)
  724. di_flags |= XFS_DIFLAG_NOATIME;
  725. if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
  726. xfs_inherit_nodump)
  727. di_flags |= XFS_DIFLAG_NODUMP;
  728. if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
  729. xfs_inherit_sync)
  730. di_flags |= XFS_DIFLAG_SYNC;
  731. if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
  732. xfs_inherit_nosymlinks)
  733. di_flags |= XFS_DIFLAG_NOSYMLINKS;
  734. if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
  735. di_flags |= XFS_DIFLAG_PROJINHERIT;
  736. if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
  737. xfs_inherit_nodefrag)
  738. di_flags |= XFS_DIFLAG_NODEFRAG;
  739. if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
  740. di_flags |= XFS_DIFLAG_FILESTREAM;
  741. ip->i_d.di_flags |= di_flags;
  742. }
  743. /* FALLTHROUGH */
  744. case S_IFLNK:
  745. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  746. ip->i_df.if_flags = XFS_IFEXTENTS;
  747. ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
  748. ip->i_df.if_u1.if_extents = NULL;
  749. break;
  750. default:
  751. ASSERT(0);
  752. }
  753. /*
  754. * Attribute fork settings for new inode.
  755. */
  756. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  757. ip->i_d.di_anextents = 0;
  758. /*
  759. * Log the new values stuffed into the inode.
  760. */
  761. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  762. xfs_trans_log_inode(tp, ip, flags);
  763. /* now that we have an i_mode we can setup inode ops and unlock */
  764. xfs_setup_inode(ip);
  765. /* now we have set up the vfs inode we can associate the filestream */
  766. if (filestreams) {
  767. error = xfs_filestream_associate(pip, ip);
  768. if (error < 0)
  769. return -error;
  770. if (!error)
  771. xfs_iflags_set(ip, XFS_IFILESTREAM);
  772. }
  773. *ipp = ip;
  774. return 0;
  775. }
  776. /*
  777. * Allocates a new inode from disk and return a pointer to the
  778. * incore copy. This routine will internally commit the current
  779. * transaction and allocate a new one if the Space Manager needed
  780. * to do an allocation to replenish the inode free-list.
  781. *
  782. * This routine is designed to be called from xfs_create and
  783. * xfs_create_dir.
  784. *
  785. */
  786. int
  787. xfs_dir_ialloc(
  788. xfs_trans_t **tpp, /* input: current transaction;
  789. output: may be a new transaction. */
  790. xfs_inode_t *dp, /* directory within whose allocate
  791. the inode. */
  792. umode_t mode,
  793. xfs_nlink_t nlink,
  794. xfs_dev_t rdev,
  795. prid_t prid, /* project id */
  796. int okalloc, /* ok to allocate new space */
  797. xfs_inode_t **ipp, /* pointer to inode; it will be
  798. locked. */
  799. int *committed)
  800. {
  801. xfs_trans_t *tp;
  802. xfs_trans_t *ntp;
  803. xfs_inode_t *ip;
  804. xfs_buf_t *ialloc_context = NULL;
  805. int code;
  806. void *dqinfo;
  807. uint tflags;
  808. tp = *tpp;
  809. ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  810. /*
  811. * xfs_ialloc will return a pointer to an incore inode if
  812. * the Space Manager has an available inode on the free
  813. * list. Otherwise, it will do an allocation and replenish
  814. * the freelist. Since we can only do one allocation per
  815. * transaction without deadlocks, we will need to commit the
  816. * current transaction and start a new one. We will then
  817. * need to call xfs_ialloc again to get the inode.
  818. *
  819. * If xfs_ialloc did an allocation to replenish the freelist,
  820. * it returns the bp containing the head of the freelist as
  821. * ialloc_context. We will hold a lock on it across the
  822. * transaction commit so that no other process can steal
  823. * the inode(s) that we've just allocated.
  824. */
  825. code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
  826. &ialloc_context, &ip);
  827. /*
  828. * Return an error if we were unable to allocate a new inode.
  829. * This should only happen if we run out of space on disk or
  830. * encounter a disk error.
  831. */
  832. if (code) {
  833. *ipp = NULL;
  834. return code;
  835. }
  836. if (!ialloc_context && !ip) {
  837. *ipp = NULL;
  838. return XFS_ERROR(ENOSPC);
  839. }
  840. /*
  841. * If the AGI buffer is non-NULL, then we were unable to get an
  842. * inode in one operation. We need to commit the current
  843. * transaction and call xfs_ialloc() again. It is guaranteed
  844. * to succeed the second time.
  845. */
  846. if (ialloc_context) {
  847. struct xfs_trans_res tres;
  848. /*
  849. * Normally, xfs_trans_commit releases all the locks.
  850. * We call bhold to hang on to the ialloc_context across
  851. * the commit. Holding this buffer prevents any other
  852. * processes from doing any allocations in this
  853. * allocation group.
  854. */
  855. xfs_trans_bhold(tp, ialloc_context);
  856. /*
  857. * Save the log reservation so we can use
  858. * them in the next transaction.
  859. */
  860. tres.tr_logres = xfs_trans_get_log_res(tp);
  861. tres.tr_logcount = xfs_trans_get_log_count(tp);
  862. /*
  863. * We want the quota changes to be associated with the next
  864. * transaction, NOT this one. So, detach the dqinfo from this
  865. * and attach it to the next transaction.
  866. */
  867. dqinfo = NULL;
  868. tflags = 0;
  869. if (tp->t_dqinfo) {
  870. dqinfo = (void *)tp->t_dqinfo;
  871. tp->t_dqinfo = NULL;
  872. tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
  873. tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
  874. }
  875. ntp = xfs_trans_dup(tp);
  876. code = xfs_trans_commit(tp, 0);
  877. tp = ntp;
  878. if (committed != NULL) {
  879. *committed = 1;
  880. }
  881. /*
  882. * If we get an error during the commit processing,
  883. * release the buffer that is still held and return
  884. * to the caller.
  885. */
  886. if (code) {
  887. xfs_buf_relse(ialloc_context);
  888. if (dqinfo) {
  889. tp->t_dqinfo = dqinfo;
  890. xfs_trans_free_dqinfo(tp);
  891. }
  892. *tpp = ntp;
  893. *ipp = NULL;
  894. return code;
  895. }
  896. /*
  897. * transaction commit worked ok so we can drop the extra ticket
  898. * reference that we gained in xfs_trans_dup()
  899. */
  900. xfs_log_ticket_put(tp->t_ticket);
  901. tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
  902. code = xfs_trans_reserve(tp, &tres, 0, 0);
  903. /*
  904. * Re-attach the quota info that we detached from prev trx.
  905. */
  906. if (dqinfo) {
  907. tp->t_dqinfo = dqinfo;
  908. tp->t_flags |= tflags;
  909. }
  910. if (code) {
  911. xfs_buf_relse(ialloc_context);
  912. *tpp = ntp;
  913. *ipp = NULL;
  914. return code;
  915. }
  916. xfs_trans_bjoin(tp, ialloc_context);
  917. /*
  918. * Call ialloc again. Since we've locked out all
  919. * other allocations in this allocation group,
  920. * this call should always succeed.
  921. */
  922. code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
  923. okalloc, &ialloc_context, &ip);
  924. /*
  925. * If we get an error at this point, return to the caller
  926. * so that the current transaction can be aborted.
  927. */
  928. if (code) {
  929. *tpp = tp;
  930. *ipp = NULL;
  931. return code;
  932. }
  933. ASSERT(!ialloc_context && ip);
  934. } else {
  935. if (committed != NULL)
  936. *committed = 0;
  937. }
  938. *ipp = ip;
  939. *tpp = tp;
  940. return 0;
  941. }
  942. /*
  943. * Decrement the link count on an inode & log the change.
  944. * If this causes the link count to go to zero, initiate the
  945. * logging activity required to truncate a file.
  946. */
  947. int /* error */
  948. xfs_droplink(
  949. xfs_trans_t *tp,
  950. xfs_inode_t *ip)
  951. {
  952. int error;
  953. xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
  954. ASSERT (ip->i_d.di_nlink > 0);
  955. ip->i_d.di_nlink--;
  956. drop_nlink(VFS_I(ip));
  957. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  958. error = 0;
  959. if (ip->i_d.di_nlink == 0) {
  960. /*
  961. * We're dropping the last link to this file.
  962. * Move the on-disk inode to the AGI unlinked list.
  963. * From xfs_inactive() we will pull the inode from
  964. * the list and free it.
  965. */
  966. error = xfs_iunlink(tp, ip);
  967. }
  968. return error;
  969. }
  970. /*
  971. * This gets called when the inode's version needs to be changed from 1 to 2.
  972. * Currently this happens when the nlink field overflows the old 16-bit value
  973. * or when chproj is called to change the project for the first time.
  974. * As a side effect the superblock version will also get rev'd
  975. * to contain the NLINK bit.
  976. */
  977. void
  978. xfs_bump_ino_vers2(
  979. xfs_trans_t *tp,
  980. xfs_inode_t *ip)
  981. {
  982. xfs_mount_t *mp;
  983. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  984. ASSERT(ip->i_d.di_version == 1);
  985. ip->i_d.di_version = 2;
  986. ip->i_d.di_onlink = 0;
  987. memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
  988. mp = tp->t_mountp;
  989. if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
  990. spin_lock(&mp->m_sb_lock);
  991. if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
  992. xfs_sb_version_addnlink(&mp->m_sb);
  993. spin_unlock(&mp->m_sb_lock);
  994. xfs_mod_sb(tp, XFS_SB_VERSIONNUM);
  995. } else {
  996. spin_unlock(&mp->m_sb_lock);
  997. }
  998. }
  999. /* Caller must log the inode */
  1000. }
  1001. /*
  1002. * Increment the link count on an inode & log the change.
  1003. */
  1004. int
  1005. xfs_bumplink(
  1006. xfs_trans_t *tp,
  1007. xfs_inode_t *ip)
  1008. {
  1009. xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
  1010. ASSERT(ip->i_d.di_nlink > 0);
  1011. ip->i_d.di_nlink++;
  1012. inc_nlink(VFS_I(ip));
  1013. if ((ip->i_d.di_version == 1) &&
  1014. (ip->i_d.di_nlink > XFS_MAXLINK_1)) {
  1015. /*
  1016. * The inode has increased its number of links beyond
  1017. * what can fit in an old format inode. It now needs
  1018. * to be converted to a version 2 inode with a 32 bit
  1019. * link count. If this is the first inode in the file
  1020. * system to do this, then we need to bump the superblock
  1021. * version number as well.
  1022. */
  1023. xfs_bump_ino_vers2(tp, ip);
  1024. }
  1025. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1026. return 0;
  1027. }
  1028. int
  1029. xfs_create(
  1030. xfs_inode_t *dp,
  1031. struct xfs_name *name,
  1032. umode_t mode,
  1033. xfs_dev_t rdev,
  1034. xfs_inode_t **ipp)
  1035. {
  1036. int is_dir = S_ISDIR(mode);
  1037. struct xfs_mount *mp = dp->i_mount;
  1038. struct xfs_inode *ip = NULL;
  1039. struct xfs_trans *tp = NULL;
  1040. int error;
  1041. xfs_bmap_free_t free_list;
  1042. xfs_fsblock_t first_block;
  1043. bool unlock_dp_on_error = false;
  1044. uint cancel_flags;
  1045. int committed;
  1046. prid_t prid;
  1047. struct xfs_dquot *udqp = NULL;
  1048. struct xfs_dquot *gdqp = NULL;
  1049. struct xfs_dquot *pdqp = NULL;
  1050. struct xfs_trans_res tres;
  1051. uint resblks;
  1052. trace_xfs_create(dp, name);
  1053. if (XFS_FORCED_SHUTDOWN(mp))
  1054. return XFS_ERROR(EIO);
  1055. if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
  1056. prid = xfs_get_projid(dp);
  1057. else
  1058. prid = XFS_PROJID_DEFAULT;
  1059. /*
  1060. * Make sure that we have allocated dquot(s) on disk.
  1061. */
  1062. error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
  1063. xfs_kgid_to_gid(current_fsgid()), prid,
  1064. XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
  1065. &udqp, &gdqp, &pdqp);
  1066. if (error)
  1067. return error;
  1068. if (is_dir) {
  1069. rdev = 0;
  1070. resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
  1071. tres.tr_logres = M_RES(mp)->tr_mkdir.tr_logres;
  1072. tres.tr_logcount = XFS_MKDIR_LOG_COUNT;
  1073. tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
  1074. } else {
  1075. resblks = XFS_CREATE_SPACE_RES(mp, name->len);
  1076. tres.tr_logres = M_RES(mp)->tr_create.tr_logres;
  1077. tres.tr_logcount = XFS_CREATE_LOG_COUNT;
  1078. tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
  1079. }
  1080. cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
  1081. /*
  1082. * Initially assume that the file does not exist and
  1083. * reserve the resources for that case. If that is not
  1084. * the case we'll drop the one we have and get a more
  1085. * appropriate transaction later.
  1086. */
  1087. tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
  1088. error = xfs_trans_reserve(tp, &tres, resblks, 0);
  1089. if (error == ENOSPC) {
  1090. /* flush outstanding delalloc blocks and retry */
  1091. xfs_flush_inodes(mp);
  1092. error = xfs_trans_reserve(tp, &tres, resblks, 0);
  1093. }
  1094. if (error == ENOSPC) {
  1095. /* No space at all so try a "no-allocation" reservation */
  1096. resblks = 0;
  1097. error = xfs_trans_reserve(tp, &tres, 0, 0);
  1098. }
  1099. if (error) {
  1100. cancel_flags = 0;
  1101. goto out_trans_cancel;
  1102. }
  1103. xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
  1104. unlock_dp_on_error = true;
  1105. xfs_bmap_init(&free_list, &first_block);
  1106. /*
  1107. * Reserve disk quota and the inode.
  1108. */
  1109. error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
  1110. pdqp, resblks, 1, 0);
  1111. if (error)
  1112. goto out_trans_cancel;
  1113. error = xfs_dir_canenter(tp, dp, name, resblks);
  1114. if (error)
  1115. goto out_trans_cancel;
  1116. /*
  1117. * A newly created regular or special file just has one directory
  1118. * entry pointing to them, but a directory also the "." entry
  1119. * pointing to itself.
  1120. */
  1121. error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
  1122. prid, resblks > 0, &ip, &committed);
  1123. if (error) {
  1124. if (error == ENOSPC)
  1125. goto out_trans_cancel;
  1126. goto out_trans_abort;
  1127. }
  1128. /*
  1129. * Now we join the directory inode to the transaction. We do not do it
  1130. * earlier because xfs_dir_ialloc might commit the previous transaction
  1131. * (and release all the locks). An error from here on will result in
  1132. * the transaction cancel unlocking dp so don't do it explicitly in the
  1133. * error path.
  1134. */
  1135. xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
  1136. unlock_dp_on_error = false;
  1137. error = xfs_dir_createname(tp, dp, name, ip->i_ino,
  1138. &first_block, &free_list, resblks ?
  1139. resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
  1140. if (error) {
  1141. ASSERT(error != ENOSPC);
  1142. goto out_trans_abort;
  1143. }
  1144. xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  1145. xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
  1146. if (is_dir) {
  1147. error = xfs_dir_init(tp, ip, dp);
  1148. if (error)
  1149. goto out_bmap_cancel;
  1150. error = xfs_bumplink(tp, dp);
  1151. if (error)
  1152. goto out_bmap_cancel;
  1153. }
  1154. /*
  1155. * If this is a synchronous mount, make sure that the
  1156. * create transaction goes to disk before returning to
  1157. * the user.
  1158. */
  1159. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  1160. xfs_trans_set_sync(tp);
  1161. /*
  1162. * Attach the dquot(s) to the inodes and modify them incore.
  1163. * These ids of the inode couldn't have changed since the new
  1164. * inode has been locked ever since it was created.
  1165. */
  1166. xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
  1167. error = xfs_bmap_finish(&tp, &free_list, &committed);
  1168. if (error)
  1169. goto out_bmap_cancel;
  1170. error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
  1171. if (error)
  1172. goto out_release_inode;
  1173. xfs_qm_dqrele(udqp);
  1174. xfs_qm_dqrele(gdqp);
  1175. xfs_qm_dqrele(pdqp);
  1176. *ipp = ip;
  1177. return 0;
  1178. out_bmap_cancel:
  1179. xfs_bmap_cancel(&free_list);
  1180. out_trans_abort:
  1181. cancel_flags |= XFS_TRANS_ABORT;
  1182. out_trans_cancel:
  1183. xfs_trans_cancel(tp, cancel_flags);
  1184. out_release_inode:
  1185. /*
  1186. * Wait until after the current transaction is aborted to
  1187. * release the inode. This prevents recursive transactions
  1188. * and deadlocks from xfs_inactive.
  1189. */
  1190. if (ip)
  1191. IRELE(ip);
  1192. xfs_qm_dqrele(udqp);
  1193. xfs_qm_dqrele(gdqp);
  1194. xfs_qm_dqrele(pdqp);
  1195. if (unlock_dp_on_error)
  1196. xfs_iunlock(dp, XFS_ILOCK_EXCL);
  1197. return error;
  1198. }
  1199. int
  1200. xfs_link(
  1201. xfs_inode_t *tdp,
  1202. xfs_inode_t *sip,
  1203. struct xfs_name *target_name)
  1204. {
  1205. xfs_mount_t *mp = tdp->i_mount;
  1206. xfs_trans_t *tp;
  1207. int error;
  1208. xfs_bmap_free_t free_list;
  1209. xfs_fsblock_t first_block;
  1210. int cancel_flags;
  1211. int committed;
  1212. int resblks;
  1213. trace_xfs_link(tdp, target_name);
  1214. ASSERT(!S_ISDIR(sip->i_d.di_mode));
  1215. if (XFS_FORCED_SHUTDOWN(mp))
  1216. return XFS_ERROR(EIO);
  1217. error = xfs_qm_dqattach(sip, 0);
  1218. if (error)
  1219. goto std_return;
  1220. error = xfs_qm_dqattach(tdp, 0);
  1221. if (error)
  1222. goto std_return;
  1223. tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
  1224. cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
  1225. resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
  1226. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, resblks, 0);
  1227. if (error == ENOSPC) {
  1228. resblks = 0;
  1229. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, 0, 0);
  1230. }
  1231. if (error) {
  1232. cancel_flags = 0;
  1233. goto error_return;
  1234. }
  1235. xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
  1236. xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
  1237. xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
  1238. /*
  1239. * If we are using project inheritance, we only allow hard link
  1240. * creation in our tree when the project IDs are the same; else
  1241. * the tree quota mechanism could be circumvented.
  1242. */
  1243. if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
  1244. (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
  1245. error = XFS_ERROR(EXDEV);
  1246. goto error_return;
  1247. }
  1248. error = xfs_dir_canenter(tp, tdp, target_name, resblks);
  1249. if (error)
  1250. goto error_return;
  1251. xfs_bmap_init(&free_list, &first_block);
  1252. error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
  1253. &first_block, &free_list, resblks);
  1254. if (error)
  1255. goto abort_return;
  1256. xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  1257. xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
  1258. error = xfs_bumplink(tp, sip);
  1259. if (error)
  1260. goto abort_return;
  1261. /*
  1262. * If this is a synchronous mount, make sure that the
  1263. * link transaction goes to disk before returning to
  1264. * the user.
  1265. */
  1266. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
  1267. xfs_trans_set_sync(tp);
  1268. }
  1269. error = xfs_bmap_finish (&tp, &free_list, &committed);
  1270. if (error) {
  1271. xfs_bmap_cancel(&free_list);
  1272. goto abort_return;
  1273. }
  1274. return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
  1275. abort_return:
  1276. cancel_flags |= XFS_TRANS_ABORT;
  1277. error_return:
  1278. xfs_trans_cancel(tp, cancel_flags);
  1279. std_return:
  1280. return error;
  1281. }
  1282. /*
  1283. * Free up the underlying blocks past new_size. The new size must be smaller
  1284. * than the current size. This routine can be used both for the attribute and
  1285. * data fork, and does not modify the inode size, which is left to the caller.
  1286. *
  1287. * The transaction passed to this routine must have made a permanent log
  1288. * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
  1289. * given transaction and start new ones, so make sure everything involved in
  1290. * the transaction is tidy before calling here. Some transaction will be
  1291. * returned to the caller to be committed. The incoming transaction must
  1292. * already include the inode, and both inode locks must be held exclusively.
  1293. * The inode must also be "held" within the transaction. On return the inode
  1294. * will be "held" within the returned transaction. This routine does NOT
  1295. * require any disk space to be reserved for it within the transaction.
  1296. *
  1297. * If we get an error, we must return with the inode locked and linked into the
  1298. * current transaction. This keeps things simple for the higher level code,
  1299. * because it always knows that the inode is locked and held in the transaction
  1300. * that returns to it whether errors occur or not. We don't mark the inode
  1301. * dirty on error so that transactions can be easily aborted if possible.
  1302. */
  1303. int
  1304. xfs_itruncate_extents(
  1305. struct xfs_trans **tpp,
  1306. struct xfs_inode *ip,
  1307. int whichfork,
  1308. xfs_fsize_t new_size)
  1309. {
  1310. struct xfs_mount *mp = ip->i_mount;
  1311. struct xfs_trans *tp = *tpp;
  1312. struct xfs_trans *ntp;
  1313. xfs_bmap_free_t free_list;
  1314. xfs_fsblock_t first_block;
  1315. xfs_fileoff_t first_unmap_block;
  1316. xfs_fileoff_t last_block;
  1317. xfs_filblks_t unmap_len;
  1318. int committed;
  1319. int error = 0;
  1320. int done = 0;
  1321. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1322. ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
  1323. xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  1324. ASSERT(new_size <= XFS_ISIZE(ip));
  1325. ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  1326. ASSERT(ip->i_itemp != NULL);
  1327. ASSERT(ip->i_itemp->ili_lock_flags == 0);
  1328. ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
  1329. trace_xfs_itruncate_extents_start(ip, new_size);
  1330. /*
  1331. * Since it is possible for space to become allocated beyond
  1332. * the end of the file (in a crash where the space is allocated
  1333. * but the inode size is not yet updated), simply remove any
  1334. * blocks which show up between the new EOF and the maximum
  1335. * possible file size. If the first block to be removed is
  1336. * beyond the maximum file size (ie it is the same as last_block),
  1337. * then there is nothing to do.
  1338. */
  1339. first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
  1340. last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  1341. if (first_unmap_block == last_block)
  1342. return 0;
  1343. ASSERT(first_unmap_block < last_block);
  1344. unmap_len = last_block - first_unmap_block + 1;
  1345. while (!done) {
  1346. xfs_bmap_init(&free_list, &first_block);
  1347. error = xfs_bunmapi(tp, ip,
  1348. first_unmap_block, unmap_len,
  1349. xfs_bmapi_aflag(whichfork),
  1350. XFS_ITRUNC_MAX_EXTENTS,
  1351. &first_block, &free_list,
  1352. &done);
  1353. if (error)
  1354. goto out_bmap_cancel;
  1355. /*
  1356. * Duplicate the transaction that has the permanent
  1357. * reservation and commit the old transaction.
  1358. */
  1359. error = xfs_bmap_finish(&tp, &free_list, &committed);
  1360. if (committed)
  1361. xfs_trans_ijoin(tp, ip, 0);
  1362. if (error)
  1363. goto out_bmap_cancel;
  1364. if (committed) {
  1365. /*
  1366. * Mark the inode dirty so it will be logged and
  1367. * moved forward in the log as part of every commit.
  1368. */
  1369. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1370. }
  1371. ntp = xfs_trans_dup(tp);
  1372. error = xfs_trans_commit(tp, 0);
  1373. tp = ntp;
  1374. xfs_trans_ijoin(tp, ip, 0);
  1375. if (error)
  1376. goto out;
  1377. /*
  1378. * Transaction commit worked ok so we can drop the extra ticket
  1379. * reference that we gained in xfs_trans_dup()
  1380. */
  1381. xfs_log_ticket_put(tp->t_ticket);
  1382. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
  1383. if (error)
  1384. goto out;
  1385. }
  1386. /*
  1387. * Always re-log the inode so that our permanent transaction can keep
  1388. * on rolling it forward in the log.
  1389. */
  1390. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1391. trace_xfs_itruncate_extents_end(ip, new_size);
  1392. out:
  1393. *tpp = tp;
  1394. return error;
  1395. out_bmap_cancel:
  1396. /*
  1397. * If the bunmapi call encounters an error, return to the caller where
  1398. * the transaction can be properly aborted. We just need to make sure
  1399. * we're not holding any resources that we were not when we came in.
  1400. */
  1401. xfs_bmap_cancel(&free_list);
  1402. goto out;
  1403. }
  1404. int
  1405. xfs_release(
  1406. xfs_inode_t *ip)
  1407. {
  1408. xfs_mount_t *mp = ip->i_mount;
  1409. int error;
  1410. if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
  1411. return 0;
  1412. /* If this is a read-only mount, don't do this (would generate I/O) */
  1413. if (mp->m_flags & XFS_MOUNT_RDONLY)
  1414. return 0;
  1415. if (!XFS_FORCED_SHUTDOWN(mp)) {
  1416. int truncated;
  1417. /*
  1418. * If we are using filestreams, and we have an unlinked
  1419. * file that we are processing the last close on, then nothing
  1420. * will be able to reopen and write to this file. Purge this
  1421. * inode from the filestreams cache so that it doesn't delay
  1422. * teardown of the inode.
  1423. */
  1424. if ((ip->i_d.di_nlink == 0) && xfs_inode_is_filestream(ip))
  1425. xfs_filestream_deassociate(ip);
  1426. /*
  1427. * If we previously truncated this file and removed old data
  1428. * in the process, we want to initiate "early" writeout on
  1429. * the last close. This is an attempt to combat the notorious
  1430. * NULL files problem which is particularly noticeable from a
  1431. * truncate down, buffered (re-)write (delalloc), followed by
  1432. * a crash. What we are effectively doing here is
  1433. * significantly reducing the time window where we'd otherwise
  1434. * be exposed to that problem.
  1435. */
  1436. truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
  1437. if (truncated) {
  1438. xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
  1439. if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) {
  1440. error = -filemap_flush(VFS_I(ip)->i_mapping);
  1441. if (error)
  1442. return error;
  1443. }
  1444. }
  1445. }
  1446. if (ip->i_d.di_nlink == 0)
  1447. return 0;
  1448. if (xfs_can_free_eofblocks(ip, false)) {
  1449. /*
  1450. * If we can't get the iolock just skip truncating the blocks
  1451. * past EOF because we could deadlock with the mmap_sem
  1452. * otherwise. We'll get another chance to drop them once the
  1453. * last reference to the inode is dropped, so we'll never leak
  1454. * blocks permanently.
  1455. *
  1456. * Further, check if the inode is being opened, written and
  1457. * closed frequently and we have delayed allocation blocks
  1458. * outstanding (e.g. streaming writes from the NFS server),
  1459. * truncating the blocks past EOF will cause fragmentation to
  1460. * occur.
  1461. *
  1462. * In this case don't do the truncation, either, but we have to
  1463. * be careful how we detect this case. Blocks beyond EOF show
  1464. * up as i_delayed_blks even when the inode is clean, so we
  1465. * need to truncate them away first before checking for a dirty
  1466. * release. Hence on the first dirty close we will still remove
  1467. * the speculative allocation, but after that we will leave it
  1468. * in place.
  1469. */
  1470. if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
  1471. return 0;
  1472. error = xfs_free_eofblocks(mp, ip, true);
  1473. if (error && error != EAGAIN)
  1474. return error;
  1475. /* delalloc blocks after truncation means it really is dirty */
  1476. if (ip->i_delayed_blks)
  1477. xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
  1478. }
  1479. return 0;
  1480. }
  1481. /*
  1482. * xfs_inactive_truncate
  1483. *
  1484. * Called to perform a truncate when an inode becomes unlinked.
  1485. */
  1486. STATIC int
  1487. xfs_inactive_truncate(
  1488. struct xfs_inode *ip)
  1489. {
  1490. struct xfs_mount *mp = ip->i_mount;
  1491. struct xfs_trans *tp;
  1492. int error;
  1493. tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
  1494. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
  1495. if (error) {
  1496. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  1497. xfs_trans_cancel(tp, 0);
  1498. return error;
  1499. }
  1500. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1501. xfs_trans_ijoin(tp, ip, 0);
  1502. /*
  1503. * Log the inode size first to prevent stale data exposure in the event
  1504. * of a system crash before the truncate completes. See the related
  1505. * comment in xfs_setattr_size() for details.
  1506. */
  1507. ip->i_d.di_size = 0;
  1508. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1509. error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
  1510. if (error)
  1511. goto error_trans_cancel;
  1512. ASSERT(ip->i_d.di_nextents == 0);
  1513. error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
  1514. if (error)
  1515. goto error_unlock;
  1516. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1517. return 0;
  1518. error_trans_cancel:
  1519. xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
  1520. error_unlock:
  1521. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1522. return error;
  1523. }
  1524. /*
  1525. * xfs_inactive_ifree()
  1526. *
  1527. * Perform the inode free when an inode is unlinked.
  1528. */
  1529. STATIC int
  1530. xfs_inactive_ifree(
  1531. struct xfs_inode *ip)
  1532. {
  1533. xfs_bmap_free_t free_list;
  1534. xfs_fsblock_t first_block;
  1535. int committed;
  1536. struct xfs_mount *mp = ip->i_mount;
  1537. struct xfs_trans *tp;
  1538. int error;
  1539. tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
  1540. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 0, 0);
  1541. if (error) {
  1542. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  1543. xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
  1544. return error;
  1545. }
  1546. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1547. xfs_trans_ijoin(tp, ip, 0);
  1548. xfs_bmap_init(&free_list, &first_block);
  1549. error = xfs_ifree(tp, ip, &free_list);
  1550. if (error) {
  1551. /*
  1552. * If we fail to free the inode, shut down. The cancel
  1553. * might do that, we need to make sure. Otherwise the
  1554. * inode might be lost for a long time or forever.
  1555. */
  1556. if (!XFS_FORCED_SHUTDOWN(mp)) {
  1557. xfs_notice(mp, "%s: xfs_ifree returned error %d",
  1558. __func__, error);
  1559. xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
  1560. }
  1561. xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
  1562. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1563. return error;
  1564. }
  1565. /*
  1566. * Credit the quota account(s). The inode is gone.
  1567. */
  1568. xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
  1569. /*
  1570. * Just ignore errors at this point. There is nothing we can
  1571. * do except to try to keep going. Make sure it's not a silent
  1572. * error.
  1573. */
  1574. error = xfs_bmap_finish(&tp, &free_list, &committed);
  1575. if (error)
  1576. xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
  1577. __func__, error);
  1578. error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
  1579. if (error)
  1580. xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
  1581. __func__, error);
  1582. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1583. return 0;
  1584. }
  1585. /*
  1586. * xfs_inactive
  1587. *
  1588. * This is called when the vnode reference count for the vnode
  1589. * goes to zero. If the file has been unlinked, then it must
  1590. * now be truncated. Also, we clear all of the read-ahead state
  1591. * kept for the inode here since the file is now closed.
  1592. */
  1593. void
  1594. xfs_inactive(
  1595. xfs_inode_t *ip)
  1596. {
  1597. struct xfs_mount *mp;
  1598. int error;
  1599. int truncate = 0;
  1600. /*
  1601. * If the inode is already free, then there can be nothing
  1602. * to clean up here.
  1603. */
  1604. if (ip->i_d.di_mode == 0) {
  1605. ASSERT(ip->i_df.if_real_bytes == 0);
  1606. ASSERT(ip->i_df.if_broot_bytes == 0);
  1607. return;
  1608. }
  1609. mp = ip->i_mount;
  1610. /* If this is a read-only mount, don't do this (would generate I/O) */
  1611. if (mp->m_flags & XFS_MOUNT_RDONLY)
  1612. return;
  1613. if (ip->i_d.di_nlink != 0) {
  1614. /*
  1615. * force is true because we are evicting an inode from the
  1616. * cache. Post-eof blocks must be freed, lest we end up with
  1617. * broken free space accounting.
  1618. */
  1619. if (xfs_can_free_eofblocks(ip, true))
  1620. xfs_free_eofblocks(mp, ip, false);
  1621. return;
  1622. }
  1623. if (S_ISREG(ip->i_d.di_mode) &&
  1624. (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
  1625. ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
  1626. truncate = 1;
  1627. error = xfs_qm_dqattach(ip, 0);
  1628. if (error)
  1629. return;
  1630. if (S_ISLNK(ip->i_d.di_mode))
  1631. error = xfs_inactive_symlink(ip);
  1632. else if (truncate)
  1633. error = xfs_inactive_truncate(ip);
  1634. if (error)
  1635. return;
  1636. /*
  1637. * If there are attributes associated with the file then blow them away
  1638. * now. The code calls a routine that recursively deconstructs the
  1639. * attribute fork. We need to just commit the current transaction
  1640. * because we can't use it for xfs_attr_inactive().
  1641. */
  1642. if (ip->i_d.di_anextents > 0) {
  1643. ASSERT(ip->i_d.di_forkoff != 0);
  1644. error = xfs_attr_inactive(ip);
  1645. if (error)
  1646. return;
  1647. }
  1648. if (ip->i_afp)
  1649. xfs_idestroy_fork(ip, XFS_ATTR_FORK);
  1650. ASSERT(ip->i_d.di_anextents == 0);
  1651. /*
  1652. * Free the inode.
  1653. */
  1654. error = xfs_inactive_ifree(ip);
  1655. if (error)
  1656. return;
  1657. /*
  1658. * Release the dquots held by inode, if any.
  1659. */
  1660. xfs_qm_dqdetach(ip);
  1661. }
  1662. /*
  1663. * This is called when the inode's link count goes to 0.
  1664. * We place the on-disk inode on a list in the AGI. It
  1665. * will be pulled from this list when the inode is freed.
  1666. */
  1667. int
  1668. xfs_iunlink(
  1669. xfs_trans_t *tp,
  1670. xfs_inode_t *ip)
  1671. {
  1672. xfs_mount_t *mp;
  1673. xfs_agi_t *agi;
  1674. xfs_dinode_t *dip;
  1675. xfs_buf_t *agibp;
  1676. xfs_buf_t *ibp;
  1677. xfs_agino_t agino;
  1678. short bucket_index;
  1679. int offset;
  1680. int error;
  1681. ASSERT(ip->i_d.di_nlink == 0);
  1682. ASSERT(ip->i_d.di_mode != 0);
  1683. mp = tp->t_mountp;
  1684. /*
  1685. * Get the agi buffer first. It ensures lock ordering
  1686. * on the list.
  1687. */
  1688. error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
  1689. if (error)
  1690. return error;
  1691. agi = XFS_BUF_TO_AGI(agibp);
  1692. /*
  1693. * Get the index into the agi hash table for the
  1694. * list this inode will go on.
  1695. */
  1696. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  1697. ASSERT(agino != 0);
  1698. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  1699. ASSERT(agi->agi_unlinked[bucket_index]);
  1700. ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
  1701. if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
  1702. /*
  1703. * There is already another inode in the bucket we need
  1704. * to add ourselves to. Add us at the front of the list.
  1705. * Here we put the head pointer into our next pointer,
  1706. * and then we fall through to point the head at us.
  1707. */
  1708. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1709. 0, 0);
  1710. if (error)
  1711. return error;
  1712. ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
  1713. dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
  1714. offset = ip->i_imap.im_boffset +
  1715. offsetof(xfs_dinode_t, di_next_unlinked);
  1716. /* need to recalc the inode CRC if appropriate */
  1717. xfs_dinode_calc_crc(mp, dip);
  1718. xfs_trans_inode_buf(tp, ibp);
  1719. xfs_trans_log_buf(tp, ibp, offset,
  1720. (offset + sizeof(xfs_agino_t) - 1));
  1721. xfs_inobp_check(mp, ibp);
  1722. }
  1723. /*
  1724. * Point the bucket head pointer at the inode being inserted.
  1725. */
  1726. ASSERT(agino != 0);
  1727. agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
  1728. offset = offsetof(xfs_agi_t, agi_unlinked) +
  1729. (sizeof(xfs_agino_t) * bucket_index);
  1730. xfs_trans_log_buf(tp, agibp, offset,
  1731. (offset + sizeof(xfs_agino_t) - 1));
  1732. return 0;
  1733. }
  1734. /*
  1735. * Pull the on-disk inode from the AGI unlinked list.
  1736. */
  1737. STATIC int
  1738. xfs_iunlink_remove(
  1739. xfs_trans_t *tp,
  1740. xfs_inode_t *ip)
  1741. {
  1742. xfs_ino_t next_ino;
  1743. xfs_mount_t *mp;
  1744. xfs_agi_t *agi;
  1745. xfs_dinode_t *dip;
  1746. xfs_buf_t *agibp;
  1747. xfs_buf_t *ibp;
  1748. xfs_agnumber_t agno;
  1749. xfs_agino_t agino;
  1750. xfs_agino_t next_agino;
  1751. xfs_buf_t *last_ibp;
  1752. xfs_dinode_t *last_dip = NULL;
  1753. short bucket_index;
  1754. int offset, last_offset = 0;
  1755. int error;
  1756. mp = tp->t_mountp;
  1757. agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
  1758. /*
  1759. * Get the agi buffer first. It ensures lock ordering
  1760. * on the list.
  1761. */
  1762. error = xfs_read_agi(mp, tp, agno, &agibp);
  1763. if (error)
  1764. return error;
  1765. agi = XFS_BUF_TO_AGI(agibp);
  1766. /*
  1767. * Get the index into the agi hash table for the
  1768. * list this inode will go on.
  1769. */
  1770. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  1771. ASSERT(agino != 0);
  1772. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  1773. ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
  1774. ASSERT(agi->agi_unlinked[bucket_index]);
  1775. if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
  1776. /*
  1777. * We're at the head of the list. Get the inode's on-disk
  1778. * buffer to see if there is anyone after us on the list.
  1779. * Only modify our next pointer if it is not already NULLAGINO.
  1780. * This saves us the overhead of dealing with the buffer when
  1781. * there is no need to change it.
  1782. */
  1783. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1784. 0, 0);
  1785. if (error) {
  1786. xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
  1787. __func__, error);
  1788. return error;
  1789. }
  1790. next_agino = be32_to_cpu(dip->di_next_unlinked);
  1791. ASSERT(next_agino != 0);
  1792. if (next_agino != NULLAGINO) {
  1793. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  1794. offset = ip->i_imap.im_boffset +
  1795. offsetof(xfs_dinode_t, di_next_unlinked);
  1796. /* need to recalc the inode CRC if appropriate */
  1797. xfs_dinode_calc_crc(mp, dip);
  1798. xfs_trans_inode_buf(tp, ibp);
  1799. xfs_trans_log_buf(tp, ibp, offset,
  1800. (offset + sizeof(xfs_agino_t) - 1));
  1801. xfs_inobp_check(mp, ibp);
  1802. } else {
  1803. xfs_trans_brelse(tp, ibp);
  1804. }
  1805. /*
  1806. * Point the bucket head pointer at the next inode.
  1807. */
  1808. ASSERT(next_agino != 0);
  1809. ASSERT(next_agino != agino);
  1810. agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
  1811. offset = offsetof(xfs_agi_t, agi_unlinked) +
  1812. (sizeof(xfs_agino_t) * bucket_index);
  1813. xfs_trans_log_buf(tp, agibp, offset,
  1814. (offset + sizeof(xfs_agino_t) - 1));
  1815. } else {
  1816. /*
  1817. * We need to search the list for the inode being freed.
  1818. */
  1819. next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
  1820. last_ibp = NULL;
  1821. while (next_agino != agino) {
  1822. struct xfs_imap imap;
  1823. if (last_ibp)
  1824. xfs_trans_brelse(tp, last_ibp);
  1825. imap.im_blkno = 0;
  1826. next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
  1827. error = xfs_imap(mp, tp, next_ino, &imap, 0);
  1828. if (error) {
  1829. xfs_warn(mp,
  1830. "%s: xfs_imap returned error %d.",
  1831. __func__, error);
  1832. return error;
  1833. }
  1834. error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
  1835. &last_ibp, 0, 0);
  1836. if (error) {
  1837. xfs_warn(mp,
  1838. "%s: xfs_imap_to_bp returned error %d.",
  1839. __func__, error);
  1840. return error;
  1841. }
  1842. last_offset = imap.im_boffset;
  1843. next_agino = be32_to_cpu(last_dip->di_next_unlinked);
  1844. ASSERT(next_agino != NULLAGINO);
  1845. ASSERT(next_agino != 0);
  1846. }
  1847. /*
  1848. * Now last_ibp points to the buffer previous to us on the
  1849. * unlinked list. Pull us from the list.
  1850. */
  1851. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1852. 0, 0);
  1853. if (error) {
  1854. xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
  1855. __func__, error);
  1856. return error;
  1857. }
  1858. next_agino = be32_to_cpu(dip->di_next_unlinked);
  1859. ASSERT(next_agino != 0);
  1860. ASSERT(next_agino != agino);
  1861. if (next_agino != NULLAGINO) {
  1862. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  1863. offset = ip->i_imap.im_boffset +
  1864. offsetof(xfs_dinode_t, di_next_unlinked);
  1865. /* need to recalc the inode CRC if appropriate */
  1866. xfs_dinode_calc_crc(mp, dip);
  1867. xfs_trans_inode_buf(tp, ibp);
  1868. xfs_trans_log_buf(tp, ibp, offset,
  1869. (offset + sizeof(xfs_agino_t) - 1));
  1870. xfs_inobp_check(mp, ibp);
  1871. } else {
  1872. xfs_trans_brelse(tp, ibp);
  1873. }
  1874. /*
  1875. * Point the previous inode on the list to the next inode.
  1876. */
  1877. last_dip->di_next_unlinked = cpu_to_be32(next_agino);
  1878. ASSERT(next_agino != 0);
  1879. offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
  1880. /* need to recalc the inode CRC if appropriate */
  1881. xfs_dinode_calc_crc(mp, last_dip);
  1882. xfs_trans_inode_buf(tp, last_ibp);
  1883. xfs_trans_log_buf(tp, last_ibp, offset,
  1884. (offset + sizeof(xfs_agino_t) - 1));
  1885. xfs_inobp_check(mp, last_ibp);
  1886. }
  1887. return 0;
  1888. }
  1889. /*
  1890. * A big issue when freeing the inode cluster is that we _cannot_ skip any
  1891. * inodes that are in memory - they all must be marked stale and attached to
  1892. * the cluster buffer.
  1893. */
  1894. STATIC int
  1895. xfs_ifree_cluster(
  1896. xfs_inode_t *free_ip,
  1897. xfs_trans_t *tp,
  1898. xfs_ino_t inum)
  1899. {
  1900. xfs_mount_t *mp = free_ip->i_mount;
  1901. int blks_per_cluster;
  1902. int nbufs;
  1903. int ninodes;
  1904. int i, j;
  1905. xfs_daddr_t blkno;
  1906. xfs_buf_t *bp;
  1907. xfs_inode_t *ip;
  1908. xfs_inode_log_item_t *iip;
  1909. xfs_log_item_t *lip;
  1910. struct xfs_perag *pag;
  1911. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
  1912. if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
  1913. blks_per_cluster = 1;
  1914. ninodes = mp->m_sb.sb_inopblock;
  1915. nbufs = XFS_IALLOC_BLOCKS(mp);
  1916. } else {
  1917. blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
  1918. mp->m_sb.sb_blocksize;
  1919. ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
  1920. nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
  1921. }
  1922. for (j = 0; j < nbufs; j++, inum += ninodes) {
  1923. blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
  1924. XFS_INO_TO_AGBNO(mp, inum));
  1925. /*
  1926. * We obtain and lock the backing buffer first in the process
  1927. * here, as we have to ensure that any dirty inode that we
  1928. * can't get the flush lock on is attached to the buffer.
  1929. * If we scan the in-memory inodes first, then buffer IO can
  1930. * complete before we get a lock on it, and hence we may fail
  1931. * to mark all the active inodes on the buffer stale.
  1932. */
  1933. bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
  1934. mp->m_bsize * blks_per_cluster,
  1935. XBF_UNMAPPED);
  1936. if (!bp)
  1937. return ENOMEM;
  1938. /*
  1939. * This buffer may not have been correctly initialised as we
  1940. * didn't read it from disk. That's not important because we are
  1941. * only using to mark the buffer as stale in the log, and to
  1942. * attach stale cached inodes on it. That means it will never be
  1943. * dispatched for IO. If it is, we want to know about it, and we
  1944. * want it to fail. We can acheive this by adding a write
  1945. * verifier to the buffer.
  1946. */
  1947. bp->b_ops = &xfs_inode_buf_ops;
  1948. /*
  1949. * Walk the inodes already attached to the buffer and mark them
  1950. * stale. These will all have the flush locks held, so an
  1951. * in-memory inode walk can't lock them. By marking them all
  1952. * stale first, we will not attempt to lock them in the loop
  1953. * below as the XFS_ISTALE flag will be set.
  1954. */
  1955. lip = bp->b_fspriv;
  1956. while (lip) {
  1957. if (lip->li_type == XFS_LI_INODE) {
  1958. iip = (xfs_inode_log_item_t *)lip;
  1959. ASSERT(iip->ili_logged == 1);
  1960. lip->li_cb = xfs_istale_done;
  1961. xfs_trans_ail_copy_lsn(mp->m_ail,
  1962. &iip->ili_flush_lsn,
  1963. &iip->ili_item.li_lsn);
  1964. xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
  1965. }
  1966. lip = lip->li_bio_list;
  1967. }
  1968. /*
  1969. * For each inode in memory attempt to add it to the inode
  1970. * buffer and set it up for being staled on buffer IO
  1971. * completion. This is safe as we've locked out tail pushing
  1972. * and flushing by locking the buffer.
  1973. *
  1974. * We have already marked every inode that was part of a
  1975. * transaction stale above, which means there is no point in
  1976. * even trying to lock them.
  1977. */
  1978. for (i = 0; i < ninodes; i++) {
  1979. retry:
  1980. rcu_read_lock();
  1981. ip = radix_tree_lookup(&pag->pag_ici_root,
  1982. XFS_INO_TO_AGINO(mp, (inum + i)));
  1983. /* Inode not in memory, nothing to do */
  1984. if (!ip) {
  1985. rcu_read_unlock();
  1986. continue;
  1987. }
  1988. /*
  1989. * because this is an RCU protected lookup, we could
  1990. * find a recently freed or even reallocated inode
  1991. * during the lookup. We need to check under the
  1992. * i_flags_lock for a valid inode here. Skip it if it
  1993. * is not valid, the wrong inode or stale.
  1994. */
  1995. spin_lock(&ip->i_flags_lock);
  1996. if (ip->i_ino != inum + i ||
  1997. __xfs_iflags_test(ip, XFS_ISTALE)) {
  1998. spin_unlock(&ip->i_flags_lock);
  1999. rcu_read_unlock();
  2000. continue;
  2001. }
  2002. spin_unlock(&ip->i_flags_lock);
  2003. /*
  2004. * Don't try to lock/unlock the current inode, but we
  2005. * _cannot_ skip the other inodes that we did not find
  2006. * in the list attached to the buffer and are not
  2007. * already marked stale. If we can't lock it, back off
  2008. * and retry.
  2009. */
  2010. if (ip != free_ip &&
  2011. !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
  2012. rcu_read_unlock();
  2013. delay(1);
  2014. goto retry;
  2015. }
  2016. rcu_read_unlock();
  2017. xfs_iflock(ip);
  2018. xfs_iflags_set(ip, XFS_ISTALE);
  2019. /*
  2020. * we don't need to attach clean inodes or those only
  2021. * with unlogged changes (which we throw away, anyway).
  2022. */
  2023. iip = ip->i_itemp;
  2024. if (!iip || xfs_inode_clean(ip)) {
  2025. ASSERT(ip != free_ip);
  2026. xfs_ifunlock(ip);
  2027. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  2028. continue;
  2029. }
  2030. iip->ili_last_fields = iip->ili_fields;
  2031. iip->ili_fields = 0;
  2032. iip->ili_logged = 1;
  2033. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  2034. &iip->ili_item.li_lsn);
  2035. xfs_buf_attach_iodone(bp, xfs_istale_done,
  2036. &iip->ili_item);
  2037. if (ip != free_ip)
  2038. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  2039. }
  2040. xfs_trans_stale_inode_buf(tp, bp);
  2041. xfs_trans_binval(tp, bp);
  2042. }
  2043. xfs_perag_put(pag);
  2044. return 0;
  2045. }
  2046. /*
  2047. * This is called to return an inode to the inode free list.
  2048. * The inode should already be truncated to 0 length and have
  2049. * no pages associated with it. This routine also assumes that
  2050. * the inode is already a part of the transaction.
  2051. *
  2052. * The on-disk copy of the inode will have been added to the list
  2053. * of unlinked inodes in the AGI. We need to remove the inode from
  2054. * that list atomically with respect to freeing it here.
  2055. */
  2056. int
  2057. xfs_ifree(
  2058. xfs_trans_t *tp,
  2059. xfs_inode_t *ip,
  2060. xfs_bmap_free_t *flist)
  2061. {
  2062. int error;
  2063. int delete;
  2064. xfs_ino_t first_ino;
  2065. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  2066. ASSERT(ip->i_d.di_nlink == 0);
  2067. ASSERT(ip->i_d.di_nextents == 0);
  2068. ASSERT(ip->i_d.di_anextents == 0);
  2069. ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
  2070. ASSERT(ip->i_d.di_nblocks == 0);
  2071. /*
  2072. * Pull the on-disk inode from the AGI unlinked list.
  2073. */
  2074. error = xfs_iunlink_remove(tp, ip);
  2075. if (error)
  2076. return error;
  2077. error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
  2078. if (error)
  2079. return error;
  2080. ip->i_d.di_mode = 0; /* mark incore inode as free */
  2081. ip->i_d.di_flags = 0;
  2082. ip->i_d.di_dmevmask = 0;
  2083. ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
  2084. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  2085. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  2086. /*
  2087. * Bump the generation count so no one will be confused
  2088. * by reincarnations of this inode.
  2089. */
  2090. ip->i_d.di_gen++;
  2091. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  2092. if (delete)
  2093. error = xfs_ifree_cluster(ip, tp, first_ino);
  2094. return error;
  2095. }
  2096. /*
  2097. * This is called to unpin an inode. The caller must have the inode locked
  2098. * in at least shared mode so that the buffer cannot be subsequently pinned
  2099. * once someone is waiting for it to be unpinned.
  2100. */
  2101. static void
  2102. xfs_iunpin(
  2103. struct xfs_inode *ip)
  2104. {
  2105. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  2106. trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
  2107. /* Give the log a push to start the unpinning I/O */
  2108. xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
  2109. }
  2110. static void
  2111. __xfs_iunpin_wait(
  2112. struct xfs_inode *ip)
  2113. {
  2114. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
  2115. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
  2116. xfs_iunpin(ip);
  2117. do {
  2118. prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  2119. if (xfs_ipincount(ip))
  2120. io_schedule();
  2121. } while (xfs_ipincount(ip));
  2122. finish_wait(wq, &wait.wait);
  2123. }
  2124. void
  2125. xfs_iunpin_wait(
  2126. struct xfs_inode *ip)
  2127. {
  2128. if (xfs_ipincount(ip))
  2129. __xfs_iunpin_wait(ip);
  2130. }
  2131. int
  2132. xfs_remove(
  2133. xfs_inode_t *dp,
  2134. struct xfs_name *name,
  2135. xfs_inode_t *ip)
  2136. {
  2137. xfs_mount_t *mp = dp->i_mount;
  2138. xfs_trans_t *tp = NULL;
  2139. int is_dir = S_ISDIR(ip->i_d.di_mode);
  2140. int error = 0;
  2141. xfs_bmap_free_t free_list;
  2142. xfs_fsblock_t first_block;
  2143. int cancel_flags;
  2144. int committed;
  2145. int link_zero;
  2146. uint resblks;
  2147. uint log_count;
  2148. trace_xfs_remove(dp, name);
  2149. if (XFS_FORCED_SHUTDOWN(mp))
  2150. return XFS_ERROR(EIO);
  2151. error = xfs_qm_dqattach(dp, 0);
  2152. if (error)
  2153. goto std_return;
  2154. error = xfs_qm_dqattach(ip, 0);
  2155. if (error)
  2156. goto std_return;
  2157. if (is_dir) {
  2158. tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
  2159. log_count = XFS_DEFAULT_LOG_COUNT;
  2160. } else {
  2161. tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
  2162. log_count = XFS_REMOVE_LOG_COUNT;
  2163. }
  2164. cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
  2165. /*
  2166. * We try to get the real space reservation first,
  2167. * allowing for directory btree deletion(s) implying
  2168. * possible bmap insert(s). If we can't get the space
  2169. * reservation then we use 0 instead, and avoid the bmap
  2170. * btree insert(s) in the directory code by, if the bmap
  2171. * insert tries to happen, instead trimming the LAST
  2172. * block from the directory.
  2173. */
  2174. resblks = XFS_REMOVE_SPACE_RES(mp);
  2175. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, resblks, 0);
  2176. if (error == ENOSPC) {
  2177. resblks = 0;
  2178. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, 0, 0);
  2179. }
  2180. if (error) {
  2181. ASSERT(error != ENOSPC);
  2182. cancel_flags = 0;
  2183. goto out_trans_cancel;
  2184. }
  2185. xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
  2186. xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
  2187. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  2188. /*
  2189. * If we're removing a directory perform some additional validation.
  2190. */
  2191. if (is_dir) {
  2192. ASSERT(ip->i_d.di_nlink >= 2);
  2193. if (ip->i_d.di_nlink != 2) {
  2194. error = XFS_ERROR(ENOTEMPTY);
  2195. goto out_trans_cancel;
  2196. }
  2197. if (!xfs_dir_isempty(ip)) {
  2198. error = XFS_ERROR(ENOTEMPTY);
  2199. goto out_trans_cancel;
  2200. }
  2201. }
  2202. xfs_bmap_init(&free_list, &first_block);
  2203. error = xfs_dir_removename(tp, dp, name, ip->i_ino,
  2204. &first_block, &free_list, resblks);
  2205. if (error) {
  2206. ASSERT(error != ENOENT);
  2207. goto out_bmap_cancel;
  2208. }
  2209. xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2210. if (is_dir) {
  2211. /*
  2212. * Drop the link from ip's "..".
  2213. */
  2214. error = xfs_droplink(tp, dp);
  2215. if (error)
  2216. goto out_bmap_cancel;
  2217. /*
  2218. * Drop the "." link from ip to self.
  2219. */
  2220. error = xfs_droplink(tp, ip);
  2221. if (error)
  2222. goto out_bmap_cancel;
  2223. } else {
  2224. /*
  2225. * When removing a non-directory we need to log the parent
  2226. * inode here. For a directory this is done implicitly
  2227. * by the xfs_droplink call for the ".." entry.
  2228. */
  2229. xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
  2230. }
  2231. /*
  2232. * Drop the link from dp to ip.
  2233. */
  2234. error = xfs_droplink(tp, ip);
  2235. if (error)
  2236. goto out_bmap_cancel;
  2237. /*
  2238. * Determine if this is the last link while
  2239. * we are in the transaction.
  2240. */
  2241. link_zero = (ip->i_d.di_nlink == 0);
  2242. /*
  2243. * If this is a synchronous mount, make sure that the
  2244. * remove transaction goes to disk before returning to
  2245. * the user.
  2246. */
  2247. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
  2248. xfs_trans_set_sync(tp);
  2249. error = xfs_bmap_finish(&tp, &free_list, &committed);
  2250. if (error)
  2251. goto out_bmap_cancel;
  2252. error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
  2253. if (error)
  2254. goto std_return;
  2255. /*
  2256. * If we are using filestreams, kill the stream association.
  2257. * If the file is still open it may get a new one but that
  2258. * will get killed on last close in xfs_close() so we don't
  2259. * have to worry about that.
  2260. */
  2261. if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
  2262. xfs_filestream_deassociate(ip);
  2263. return 0;
  2264. out_bmap_cancel:
  2265. xfs_bmap_cancel(&free_list);
  2266. cancel_flags |= XFS_TRANS_ABORT;
  2267. out_trans_cancel:
  2268. xfs_trans_cancel(tp, cancel_flags);
  2269. std_return:
  2270. return error;
  2271. }
  2272. /*
  2273. * Enter all inodes for a rename transaction into a sorted array.
  2274. */
  2275. STATIC void
  2276. xfs_sort_for_rename(
  2277. xfs_inode_t *dp1, /* in: old (source) directory inode */
  2278. xfs_inode_t *dp2, /* in: new (target) directory inode */
  2279. xfs_inode_t *ip1, /* in: inode of old entry */
  2280. xfs_inode_t *ip2, /* in: inode of new entry, if it
  2281. already exists, NULL otherwise. */
  2282. xfs_inode_t **i_tab,/* out: array of inode returned, sorted */
  2283. int *num_inodes) /* out: number of inodes in array */
  2284. {
  2285. xfs_inode_t *temp;
  2286. int i, j;
  2287. /*
  2288. * i_tab contains a list of pointers to inodes. We initialize
  2289. * the table here & we'll sort it. We will then use it to
  2290. * order the acquisition of the inode locks.
  2291. *
  2292. * Note that the table may contain duplicates. e.g., dp1 == dp2.
  2293. */
  2294. i_tab[0] = dp1;
  2295. i_tab[1] = dp2;
  2296. i_tab[2] = ip1;
  2297. if (ip2) {
  2298. *num_inodes = 4;
  2299. i_tab[3] = ip2;
  2300. } else {
  2301. *num_inodes = 3;
  2302. i_tab[3] = NULL;
  2303. }
  2304. /*
  2305. * Sort the elements via bubble sort. (Remember, there are at
  2306. * most 4 elements to sort, so this is adequate.)
  2307. */
  2308. for (i = 0; i < *num_inodes; i++) {
  2309. for (j = 1; j < *num_inodes; j++) {
  2310. if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
  2311. temp = i_tab[j];
  2312. i_tab[j] = i_tab[j-1];
  2313. i_tab[j-1] = temp;
  2314. }
  2315. }
  2316. }
  2317. }
  2318. /*
  2319. * xfs_rename
  2320. */
  2321. int
  2322. xfs_rename(
  2323. xfs_inode_t *src_dp,
  2324. struct xfs_name *src_name,
  2325. xfs_inode_t *src_ip,
  2326. xfs_inode_t *target_dp,
  2327. struct xfs_name *target_name,
  2328. xfs_inode_t *target_ip)
  2329. {
  2330. xfs_trans_t *tp = NULL;
  2331. xfs_mount_t *mp = src_dp->i_mount;
  2332. int new_parent; /* moving to a new dir */
  2333. int src_is_directory; /* src_name is a directory */
  2334. int error;
  2335. xfs_bmap_free_t free_list;
  2336. xfs_fsblock_t first_block;
  2337. int cancel_flags;
  2338. int committed;
  2339. xfs_inode_t *inodes[4];
  2340. int spaceres;
  2341. int num_inodes;
  2342. trace_xfs_rename(src_dp, target_dp, src_name, target_name);
  2343. new_parent = (src_dp != target_dp);
  2344. src_is_directory = S_ISDIR(src_ip->i_d.di_mode);
  2345. xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip,
  2346. inodes, &num_inodes);
  2347. xfs_bmap_init(&free_list, &first_block);
  2348. tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
  2349. cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
  2350. spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
  2351. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0);
  2352. if (error == ENOSPC) {
  2353. spaceres = 0;
  2354. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0);
  2355. }
  2356. if (error) {
  2357. xfs_trans_cancel(tp, 0);
  2358. goto std_return;
  2359. }
  2360. /*
  2361. * Attach the dquots to the inodes
  2362. */
  2363. error = xfs_qm_vop_rename_dqattach(inodes);
  2364. if (error) {
  2365. xfs_trans_cancel(tp, cancel_flags);
  2366. goto std_return;
  2367. }
  2368. /*
  2369. * Lock all the participating inodes. Depending upon whether
  2370. * the target_name exists in the target directory, and
  2371. * whether the target directory is the same as the source
  2372. * directory, we can lock from 2 to 4 inodes.
  2373. */
  2374. xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
  2375. /*
  2376. * Join all the inodes to the transaction. From this point on,
  2377. * we can rely on either trans_commit or trans_cancel to unlock
  2378. * them.
  2379. */
  2380. xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
  2381. if (new_parent)
  2382. xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
  2383. xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
  2384. if (target_ip)
  2385. xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
  2386. /*
  2387. * If we are using project inheritance, we only allow renames
  2388. * into our tree when the project IDs are the same; else the
  2389. * tree quota mechanism would be circumvented.
  2390. */
  2391. if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
  2392. (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
  2393. error = XFS_ERROR(EXDEV);
  2394. goto error_return;
  2395. }
  2396. /*
  2397. * Set up the target.
  2398. */
  2399. if (target_ip == NULL) {
  2400. /*
  2401. * If there's no space reservation, check the entry will
  2402. * fit before actually inserting it.
  2403. */
  2404. error = xfs_dir_canenter(tp, target_dp, target_name, spaceres);
  2405. if (error)
  2406. goto error_return;
  2407. /*
  2408. * If target does not exist and the rename crosses
  2409. * directories, adjust the target directory link count
  2410. * to account for the ".." reference from the new entry.
  2411. */
  2412. error = xfs_dir_createname(tp, target_dp, target_name,
  2413. src_ip->i_ino, &first_block,
  2414. &free_list, spaceres);
  2415. if (error == ENOSPC)
  2416. goto error_return;
  2417. if (error)
  2418. goto abort_return;
  2419. xfs_trans_ichgtime(tp, target_dp,
  2420. XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2421. if (new_parent && src_is_directory) {
  2422. error = xfs_bumplink(tp, target_dp);
  2423. if (error)
  2424. goto abort_return;
  2425. }
  2426. } else { /* target_ip != NULL */
  2427. /*
  2428. * If target exists and it's a directory, check that both
  2429. * target and source are directories and that target can be
  2430. * destroyed, or that neither is a directory.
  2431. */
  2432. if (S_ISDIR(target_ip->i_d.di_mode)) {
  2433. /*
  2434. * Make sure target dir is empty.
  2435. */
  2436. if (!(xfs_dir_isempty(target_ip)) ||
  2437. (target_ip->i_d.di_nlink > 2)) {
  2438. error = XFS_ERROR(EEXIST);
  2439. goto error_return;
  2440. }
  2441. }
  2442. /*
  2443. * Link the source inode under the target name.
  2444. * If the source inode is a directory and we are moving
  2445. * it across directories, its ".." entry will be
  2446. * inconsistent until we replace that down below.
  2447. *
  2448. * In case there is already an entry with the same
  2449. * name at the destination directory, remove it first.
  2450. */
  2451. error = xfs_dir_replace(tp, target_dp, target_name,
  2452. src_ip->i_ino,
  2453. &first_block, &free_list, spaceres);
  2454. if (error)
  2455. goto abort_return;
  2456. xfs_trans_ichgtime(tp, target_dp,
  2457. XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2458. /*
  2459. * Decrement the link count on the target since the target
  2460. * dir no longer points to it.
  2461. */
  2462. error = xfs_droplink(tp, target_ip);
  2463. if (error)
  2464. goto abort_return;
  2465. if (src_is_directory) {
  2466. /*
  2467. * Drop the link from the old "." entry.
  2468. */
  2469. error = xfs_droplink(tp, target_ip);
  2470. if (error)
  2471. goto abort_return;
  2472. }
  2473. } /* target_ip != NULL */
  2474. /*
  2475. * Remove the source.
  2476. */
  2477. if (new_parent && src_is_directory) {
  2478. /*
  2479. * Rewrite the ".." entry to point to the new
  2480. * directory.
  2481. */
  2482. error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
  2483. target_dp->i_ino,
  2484. &first_block, &free_list, spaceres);
  2485. ASSERT(error != EEXIST);
  2486. if (error)
  2487. goto abort_return;
  2488. }
  2489. /*
  2490. * We always want to hit the ctime on the source inode.
  2491. *
  2492. * This isn't strictly required by the standards since the source
  2493. * inode isn't really being changed, but old unix file systems did
  2494. * it and some incremental backup programs won't work without it.
  2495. */
  2496. xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
  2497. xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
  2498. /*
  2499. * Adjust the link count on src_dp. This is necessary when
  2500. * renaming a directory, either within one parent when
  2501. * the target existed, or across two parent directories.
  2502. */
  2503. if (src_is_directory && (new_parent || target_ip != NULL)) {
  2504. /*
  2505. * Decrement link count on src_directory since the
  2506. * entry that's moved no longer points to it.
  2507. */
  2508. error = xfs_droplink(tp, src_dp);
  2509. if (error)
  2510. goto abort_return;
  2511. }
  2512. error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
  2513. &first_block, &free_list, spaceres);
  2514. if (error)
  2515. goto abort_return;
  2516. xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
  2517. xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
  2518. if (new_parent)
  2519. xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
  2520. /*
  2521. * If this is a synchronous mount, make sure that the
  2522. * rename transaction goes to disk before returning to
  2523. * the user.
  2524. */
  2525. if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
  2526. xfs_trans_set_sync(tp);
  2527. }
  2528. error = xfs_bmap_finish(&tp, &free_list, &committed);
  2529. if (error) {
  2530. xfs_bmap_cancel(&free_list);
  2531. xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
  2532. XFS_TRANS_ABORT));
  2533. goto std_return;
  2534. }
  2535. /*
  2536. * trans_commit will unlock src_ip, target_ip & decrement
  2537. * the vnode references.
  2538. */
  2539. return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
  2540. abort_return:
  2541. cancel_flags |= XFS_TRANS_ABORT;
  2542. error_return:
  2543. xfs_bmap_cancel(&free_list);
  2544. xfs_trans_cancel(tp, cancel_flags);
  2545. std_return:
  2546. return error;
  2547. }
  2548. STATIC int
  2549. xfs_iflush_cluster(
  2550. xfs_inode_t *ip,
  2551. xfs_buf_t *bp)
  2552. {
  2553. xfs_mount_t *mp = ip->i_mount;
  2554. struct xfs_perag *pag;
  2555. unsigned long first_index, mask;
  2556. unsigned long inodes_per_cluster;
  2557. int ilist_size;
  2558. xfs_inode_t **ilist;
  2559. xfs_inode_t *iq;
  2560. int nr_found;
  2561. int clcount = 0;
  2562. int bufwasdelwri;
  2563. int i;
  2564. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
  2565. inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
  2566. ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
  2567. ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
  2568. if (!ilist)
  2569. goto out_put;
  2570. mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
  2571. first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
  2572. rcu_read_lock();
  2573. /* really need a gang lookup range call here */
  2574. nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
  2575. first_index, inodes_per_cluster);
  2576. if (nr_found == 0)
  2577. goto out_free;
  2578. for (i = 0; i < nr_found; i++) {
  2579. iq = ilist[i];
  2580. if (iq == ip)
  2581. continue;
  2582. /*
  2583. * because this is an RCU protected lookup, we could find a
  2584. * recently freed or even reallocated inode during the lookup.
  2585. * We need to check under the i_flags_lock for a valid inode
  2586. * here. Skip it if it is not valid or the wrong inode.
  2587. */
  2588. spin_lock(&ip->i_flags_lock);
  2589. if (!ip->i_ino ||
  2590. (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
  2591. spin_unlock(&ip->i_flags_lock);
  2592. continue;
  2593. }
  2594. spin_unlock(&ip->i_flags_lock);
  2595. /*
  2596. * Do an un-protected check to see if the inode is dirty and
  2597. * is a candidate for flushing. These checks will be repeated
  2598. * later after the appropriate locks are acquired.
  2599. */
  2600. if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
  2601. continue;
  2602. /*
  2603. * Try to get locks. If any are unavailable or it is pinned,
  2604. * then this inode cannot be flushed and is skipped.
  2605. */
  2606. if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
  2607. continue;
  2608. if (!xfs_iflock_nowait(iq)) {
  2609. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  2610. continue;
  2611. }
  2612. if (xfs_ipincount(iq)) {
  2613. xfs_ifunlock(iq);
  2614. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  2615. continue;
  2616. }
  2617. /*
  2618. * arriving here means that this inode can be flushed. First
  2619. * re-check that it's dirty before flushing.
  2620. */
  2621. if (!xfs_inode_clean(iq)) {
  2622. int error;
  2623. error = xfs_iflush_int(iq, bp);
  2624. if (error) {
  2625. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  2626. goto cluster_corrupt_out;
  2627. }
  2628. clcount++;
  2629. } else {
  2630. xfs_ifunlock(iq);
  2631. }
  2632. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  2633. }
  2634. if (clcount) {
  2635. XFS_STATS_INC(xs_icluster_flushcnt);
  2636. XFS_STATS_ADD(xs_icluster_flushinode, clcount);
  2637. }
  2638. out_free:
  2639. rcu_read_unlock();
  2640. kmem_free(ilist);
  2641. out_put:
  2642. xfs_perag_put(pag);
  2643. return 0;
  2644. cluster_corrupt_out:
  2645. /*
  2646. * Corruption detected in the clustering loop. Invalidate the
  2647. * inode buffer and shut down the filesystem.
  2648. */
  2649. rcu_read_unlock();
  2650. /*
  2651. * Clean up the buffer. If it was delwri, just release it --
  2652. * brelse can handle it with no problems. If not, shut down the
  2653. * filesystem before releasing the buffer.
  2654. */
  2655. bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
  2656. if (bufwasdelwri)
  2657. xfs_buf_relse(bp);
  2658. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  2659. if (!bufwasdelwri) {
  2660. /*
  2661. * Just like incore_relse: if we have b_iodone functions,
  2662. * mark the buffer as an error and call them. Otherwise
  2663. * mark it as stale and brelse.
  2664. */
  2665. if (bp->b_iodone) {
  2666. XFS_BUF_UNDONE(bp);
  2667. xfs_buf_stale(bp);
  2668. xfs_buf_ioerror(bp, EIO);
  2669. xfs_buf_ioend(bp, 0);
  2670. } else {
  2671. xfs_buf_stale(bp);
  2672. xfs_buf_relse(bp);
  2673. }
  2674. }
  2675. /*
  2676. * Unlocks the flush lock
  2677. */
  2678. xfs_iflush_abort(iq, false);
  2679. kmem_free(ilist);
  2680. xfs_perag_put(pag);
  2681. return XFS_ERROR(EFSCORRUPTED);
  2682. }
  2683. /*
  2684. * Flush dirty inode metadata into the backing buffer.
  2685. *
  2686. * The caller must have the inode lock and the inode flush lock held. The
  2687. * inode lock will still be held upon return to the caller, and the inode
  2688. * flush lock will be released after the inode has reached the disk.
  2689. *
  2690. * The caller must write out the buffer returned in *bpp and release it.
  2691. */
  2692. int
  2693. xfs_iflush(
  2694. struct xfs_inode *ip,
  2695. struct xfs_buf **bpp)
  2696. {
  2697. struct xfs_mount *mp = ip->i_mount;
  2698. struct xfs_buf *bp;
  2699. struct xfs_dinode *dip;
  2700. int error;
  2701. XFS_STATS_INC(xs_iflush_count);
  2702. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  2703. ASSERT(xfs_isiflocked(ip));
  2704. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  2705. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  2706. *bpp = NULL;
  2707. xfs_iunpin_wait(ip);
  2708. /*
  2709. * For stale inodes we cannot rely on the backing buffer remaining
  2710. * stale in cache for the remaining life of the stale inode and so
  2711. * xfs_imap_to_bp() below may give us a buffer that no longer contains
  2712. * inodes below. We have to check this after ensuring the inode is
  2713. * unpinned so that it is safe to reclaim the stale inode after the
  2714. * flush call.
  2715. */
  2716. if (xfs_iflags_test(ip, XFS_ISTALE)) {
  2717. xfs_ifunlock(ip);
  2718. return 0;
  2719. }
  2720. /*
  2721. * This may have been unpinned because the filesystem is shutting
  2722. * down forcibly. If that's the case we must not write this inode
  2723. * to disk, because the log record didn't make it to disk.
  2724. *
  2725. * We also have to remove the log item from the AIL in this case,
  2726. * as we wait for an empty AIL as part of the unmount process.
  2727. */
  2728. if (XFS_FORCED_SHUTDOWN(mp)) {
  2729. error = XFS_ERROR(EIO);
  2730. goto abort_out;
  2731. }
  2732. /*
  2733. * Get the buffer containing the on-disk inode.
  2734. */
  2735. error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
  2736. 0);
  2737. if (error || !bp) {
  2738. xfs_ifunlock(ip);
  2739. return error;
  2740. }
  2741. /*
  2742. * First flush out the inode that xfs_iflush was called with.
  2743. */
  2744. error = xfs_iflush_int(ip, bp);
  2745. if (error)
  2746. goto corrupt_out;
  2747. /*
  2748. * If the buffer is pinned then push on the log now so we won't
  2749. * get stuck waiting in the write for too long.
  2750. */
  2751. if (xfs_buf_ispinned(bp))
  2752. xfs_log_force(mp, 0);
  2753. /*
  2754. * inode clustering:
  2755. * see if other inodes can be gathered into this write
  2756. */
  2757. error = xfs_iflush_cluster(ip, bp);
  2758. if (error)
  2759. goto cluster_corrupt_out;
  2760. *bpp = bp;
  2761. return 0;
  2762. corrupt_out:
  2763. xfs_buf_relse(bp);
  2764. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  2765. cluster_corrupt_out:
  2766. error = XFS_ERROR(EFSCORRUPTED);
  2767. abort_out:
  2768. /*
  2769. * Unlocks the flush lock
  2770. */
  2771. xfs_iflush_abort(ip, false);
  2772. return error;
  2773. }
  2774. STATIC int
  2775. xfs_iflush_int(
  2776. struct xfs_inode *ip,
  2777. struct xfs_buf *bp)
  2778. {
  2779. struct xfs_inode_log_item *iip = ip->i_itemp;
  2780. struct xfs_dinode *dip;
  2781. struct xfs_mount *mp = ip->i_mount;
  2782. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  2783. ASSERT(xfs_isiflocked(ip));
  2784. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  2785. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  2786. ASSERT(iip != NULL && iip->ili_fields != 0);
  2787. /* set *dip = inode's place in the buffer */
  2788. dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
  2789. if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
  2790. mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
  2791. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  2792. "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
  2793. __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
  2794. goto corrupt_out;
  2795. }
  2796. if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
  2797. mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
  2798. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  2799. "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
  2800. __func__, ip->i_ino, ip, ip->i_d.di_magic);
  2801. goto corrupt_out;
  2802. }
  2803. if (S_ISREG(ip->i_d.di_mode)) {
  2804. if (XFS_TEST_ERROR(
  2805. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  2806. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
  2807. mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
  2808. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  2809. "%s: Bad regular inode %Lu, ptr 0x%p",
  2810. __func__, ip->i_ino, ip);
  2811. goto corrupt_out;
  2812. }
  2813. } else if (S_ISDIR(ip->i_d.di_mode)) {
  2814. if (XFS_TEST_ERROR(
  2815. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  2816. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
  2817. (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
  2818. mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
  2819. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  2820. "%s: Bad directory inode %Lu, ptr 0x%p",
  2821. __func__, ip->i_ino, ip);
  2822. goto corrupt_out;
  2823. }
  2824. }
  2825. if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
  2826. ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
  2827. XFS_RANDOM_IFLUSH_5)) {
  2828. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  2829. "%s: detected corrupt incore inode %Lu, "
  2830. "total extents = %d, nblocks = %Ld, ptr 0x%p",
  2831. __func__, ip->i_ino,
  2832. ip->i_d.di_nextents + ip->i_d.di_anextents,
  2833. ip->i_d.di_nblocks, ip);
  2834. goto corrupt_out;
  2835. }
  2836. if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
  2837. mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
  2838. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  2839. "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
  2840. __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
  2841. goto corrupt_out;
  2842. }
  2843. /*
  2844. * Inode item log recovery for v1/v2 inodes are dependent on the
  2845. * di_flushiter count for correct sequencing. We bump the flush
  2846. * iteration count so we can detect flushes which postdate a log record
  2847. * during recovery. This is redundant as we now log every change and
  2848. * hence this can't happen but we need to still do it to ensure
  2849. * backwards compatibility with old kernels that predate logging all
  2850. * inode changes.
  2851. */
  2852. if (ip->i_d.di_version < 3)
  2853. ip->i_d.di_flushiter++;
  2854. /*
  2855. * Copy the dirty parts of the inode into the on-disk
  2856. * inode. We always copy out the core of the inode,
  2857. * because if the inode is dirty at all the core must
  2858. * be.
  2859. */
  2860. xfs_dinode_to_disk(dip, &ip->i_d);
  2861. /* Wrap, we never let the log put out DI_MAX_FLUSH */
  2862. if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
  2863. ip->i_d.di_flushiter = 0;
  2864. /*
  2865. * If this is really an old format inode and the superblock version
  2866. * has not been updated to support only new format inodes, then
  2867. * convert back to the old inode format. If the superblock version
  2868. * has been updated, then make the conversion permanent.
  2869. */
  2870. ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
  2871. if (ip->i_d.di_version == 1) {
  2872. if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
  2873. /*
  2874. * Convert it back.
  2875. */
  2876. ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
  2877. dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
  2878. } else {
  2879. /*
  2880. * The superblock version has already been bumped,
  2881. * so just make the conversion to the new inode
  2882. * format permanent.
  2883. */
  2884. ip->i_d.di_version = 2;
  2885. dip->di_version = 2;
  2886. ip->i_d.di_onlink = 0;
  2887. dip->di_onlink = 0;
  2888. memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
  2889. memset(&(dip->di_pad[0]), 0,
  2890. sizeof(dip->di_pad));
  2891. ASSERT(xfs_get_projid(ip) == 0);
  2892. }
  2893. }
  2894. xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
  2895. if (XFS_IFORK_Q(ip))
  2896. xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
  2897. xfs_inobp_check(mp, bp);
  2898. /*
  2899. * We've recorded everything logged in the inode, so we'd like to clear
  2900. * the ili_fields bits so we don't log and flush things unnecessarily.
  2901. * However, we can't stop logging all this information until the data
  2902. * we've copied into the disk buffer is written to disk. If we did we
  2903. * might overwrite the copy of the inode in the log with all the data
  2904. * after re-logging only part of it, and in the face of a crash we
  2905. * wouldn't have all the data we need to recover.
  2906. *
  2907. * What we do is move the bits to the ili_last_fields field. When
  2908. * logging the inode, these bits are moved back to the ili_fields field.
  2909. * In the xfs_iflush_done() routine we clear ili_last_fields, since we
  2910. * know that the information those bits represent is permanently on
  2911. * disk. As long as the flush completes before the inode is logged
  2912. * again, then both ili_fields and ili_last_fields will be cleared.
  2913. *
  2914. * We can play with the ili_fields bits here, because the inode lock
  2915. * must be held exclusively in order to set bits there and the flush
  2916. * lock protects the ili_last_fields bits. Set ili_logged so the flush
  2917. * done routine can tell whether or not to look in the AIL. Also, store
  2918. * the current LSN of the inode so that we can tell whether the item has
  2919. * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
  2920. * need the AIL lock, because it is a 64 bit value that cannot be read
  2921. * atomically.
  2922. */
  2923. iip->ili_last_fields = iip->ili_fields;
  2924. iip->ili_fields = 0;
  2925. iip->ili_logged = 1;
  2926. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  2927. &iip->ili_item.li_lsn);
  2928. /*
  2929. * Attach the function xfs_iflush_done to the inode's
  2930. * buffer. This will remove the inode from the AIL
  2931. * and unlock the inode's flush lock when the inode is
  2932. * completely written to disk.
  2933. */
  2934. xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
  2935. /* update the lsn in the on disk inode if required */
  2936. if (ip->i_d.di_version == 3)
  2937. dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn);
  2938. /* generate the checksum. */
  2939. xfs_dinode_calc_crc(mp, dip);
  2940. ASSERT(bp->b_fspriv != NULL);
  2941. ASSERT(bp->b_iodone != NULL);
  2942. return 0;
  2943. corrupt_out:
  2944. return XFS_ERROR(EFSCORRUPTED);
  2945. }