xfs_inode.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <linux/log2.h>
  19. #include "xfs.h"
  20. #include "xfs_fs.h"
  21. #include "xfs_format.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_trans_priv.h"
  26. #include "xfs_sb.h"
  27. #include "xfs_ag.h"
  28. #include "xfs_mount.h"
  29. #include "xfs_bmap_btree.h"
  30. #include "xfs_alloc_btree.h"
  31. #include "xfs_ialloc_btree.h"
  32. #include "xfs_attr_sf.h"
  33. #include "xfs_dinode.h"
  34. #include "xfs_inode.h"
  35. #include "xfs_buf_item.h"
  36. #include "xfs_inode_item.h"
  37. #include "xfs_btree.h"
  38. #include "xfs_alloc.h"
  39. #include "xfs_ialloc.h"
  40. #include "xfs_bmap.h"
  41. #include "xfs_error.h"
  42. #include "xfs_utils.h"
  43. #include "xfs_quota.h"
  44. #include "xfs_filestream.h"
  45. #include "xfs_vnodeops.h"
  46. #include "xfs_cksum.h"
  47. #include "xfs_trace.h"
  48. #include "xfs_icache.h"
  49. kmem_zone_t *xfs_inode_zone;
  50. /*
  51. * Used in xfs_itruncate_extents(). This is the maximum number of extents
  52. * freed from a file in a single transaction.
  53. */
  54. #define XFS_ITRUNC_MAX_EXTENTS 2
  55. STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
  56. /*
  57. * helper function to extract extent size hint from inode
  58. */
  59. xfs_extlen_t
  60. xfs_get_extsz_hint(
  61. struct xfs_inode *ip)
  62. {
  63. if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
  64. return ip->i_d.di_extsize;
  65. if (XFS_IS_REALTIME_INODE(ip))
  66. return ip->i_mount->m_sb.sb_rextsize;
  67. return 0;
  68. }
  69. /*
  70. * This is a wrapper routine around the xfs_ilock() routine used to centralize
  71. * some grungy code. It is used in places that wish to lock the inode solely
  72. * for reading the extents. The reason these places can't just call
  73. * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the
  74. * extents from disk for a file in b-tree format. If the inode is in b-tree
  75. * format, then we need to lock the inode exclusively until the extents are read
  76. * in. Locking it exclusively all the time would limit our parallelism
  77. * unnecessarily, though. What we do instead is check to see if the extents
  78. * have been read in yet, and only lock the inode exclusively if they have not.
  79. *
  80. * The function returns a value which should be given to the corresponding
  81. * xfs_iunlock_map_shared(). This value is the mode in which the lock was
  82. * actually taken.
  83. */
  84. uint
  85. xfs_ilock_map_shared(
  86. xfs_inode_t *ip)
  87. {
  88. uint lock_mode;
  89. if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
  90. ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
  91. lock_mode = XFS_ILOCK_EXCL;
  92. } else {
  93. lock_mode = XFS_ILOCK_SHARED;
  94. }
  95. xfs_ilock(ip, lock_mode);
  96. return lock_mode;
  97. }
  98. /*
  99. * This is simply the unlock routine to go with xfs_ilock_map_shared().
  100. * All it does is call xfs_iunlock() with the given lock_mode.
  101. */
  102. void
  103. xfs_iunlock_map_shared(
  104. xfs_inode_t *ip,
  105. unsigned int lock_mode)
  106. {
  107. xfs_iunlock(ip, lock_mode);
  108. }
  109. /*
  110. * The xfs inode contains 2 locks: a multi-reader lock called the
  111. * i_iolock and a multi-reader lock called the i_lock. This routine
  112. * allows either or both of the locks to be obtained.
  113. *
  114. * The 2 locks should always be ordered so that the IO lock is
  115. * obtained first in order to prevent deadlock.
  116. *
  117. * ip -- the inode being locked
  118. * lock_flags -- this parameter indicates the inode's locks
  119. * to be locked. It can be:
  120. * XFS_IOLOCK_SHARED,
  121. * XFS_IOLOCK_EXCL,
  122. * XFS_ILOCK_SHARED,
  123. * XFS_ILOCK_EXCL,
  124. * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
  125. * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
  126. * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
  127. * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
  128. */
  129. void
  130. xfs_ilock(
  131. xfs_inode_t *ip,
  132. uint lock_flags)
  133. {
  134. trace_xfs_ilock(ip, lock_flags, _RET_IP_);
  135. /*
  136. * You can't set both SHARED and EXCL for the same lock,
  137. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  138. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  139. */
  140. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  141. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  142. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  143. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  144. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  145. if (lock_flags & XFS_IOLOCK_EXCL)
  146. mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  147. else if (lock_flags & XFS_IOLOCK_SHARED)
  148. mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  149. if (lock_flags & XFS_ILOCK_EXCL)
  150. mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  151. else if (lock_flags & XFS_ILOCK_SHARED)
  152. mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  153. }
  154. /*
  155. * This is just like xfs_ilock(), except that the caller
  156. * is guaranteed not to sleep. It returns 1 if it gets
  157. * the requested locks and 0 otherwise. If the IO lock is
  158. * obtained but the inode lock cannot be, then the IO lock
  159. * is dropped before returning.
  160. *
  161. * ip -- the inode being locked
  162. * lock_flags -- this parameter indicates the inode's locks to be
  163. * to be locked. See the comment for xfs_ilock() for a list
  164. * of valid values.
  165. */
  166. int
  167. xfs_ilock_nowait(
  168. xfs_inode_t *ip,
  169. uint lock_flags)
  170. {
  171. trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
  172. /*
  173. * You can't set both SHARED and EXCL for the same lock,
  174. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  175. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  176. */
  177. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  178. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  179. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  180. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  181. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  182. if (lock_flags & XFS_IOLOCK_EXCL) {
  183. if (!mrtryupdate(&ip->i_iolock))
  184. goto out;
  185. } else if (lock_flags & XFS_IOLOCK_SHARED) {
  186. if (!mrtryaccess(&ip->i_iolock))
  187. goto out;
  188. }
  189. if (lock_flags & XFS_ILOCK_EXCL) {
  190. if (!mrtryupdate(&ip->i_lock))
  191. goto out_undo_iolock;
  192. } else if (lock_flags & XFS_ILOCK_SHARED) {
  193. if (!mrtryaccess(&ip->i_lock))
  194. goto out_undo_iolock;
  195. }
  196. return 1;
  197. out_undo_iolock:
  198. if (lock_flags & XFS_IOLOCK_EXCL)
  199. mrunlock_excl(&ip->i_iolock);
  200. else if (lock_flags & XFS_IOLOCK_SHARED)
  201. mrunlock_shared(&ip->i_iolock);
  202. out:
  203. return 0;
  204. }
  205. /*
  206. * xfs_iunlock() is used to drop the inode locks acquired with
  207. * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
  208. * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
  209. * that we know which locks to drop.
  210. *
  211. * ip -- the inode being unlocked
  212. * lock_flags -- this parameter indicates the inode's locks to be
  213. * to be unlocked. See the comment for xfs_ilock() for a list
  214. * of valid values for this parameter.
  215. *
  216. */
  217. void
  218. xfs_iunlock(
  219. xfs_inode_t *ip,
  220. uint lock_flags)
  221. {
  222. /*
  223. * You can't set both SHARED and EXCL for the same lock,
  224. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  225. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  226. */
  227. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  228. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  229. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  230. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  231. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  232. ASSERT(lock_flags != 0);
  233. if (lock_flags & XFS_IOLOCK_EXCL)
  234. mrunlock_excl(&ip->i_iolock);
  235. else if (lock_flags & XFS_IOLOCK_SHARED)
  236. mrunlock_shared(&ip->i_iolock);
  237. if (lock_flags & XFS_ILOCK_EXCL)
  238. mrunlock_excl(&ip->i_lock);
  239. else if (lock_flags & XFS_ILOCK_SHARED)
  240. mrunlock_shared(&ip->i_lock);
  241. trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
  242. }
  243. /*
  244. * give up write locks. the i/o lock cannot be held nested
  245. * if it is being demoted.
  246. */
  247. void
  248. xfs_ilock_demote(
  249. xfs_inode_t *ip,
  250. uint lock_flags)
  251. {
  252. ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
  253. ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
  254. if (lock_flags & XFS_ILOCK_EXCL)
  255. mrdemote(&ip->i_lock);
  256. if (lock_flags & XFS_IOLOCK_EXCL)
  257. mrdemote(&ip->i_iolock);
  258. trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
  259. }
  260. #if defined(DEBUG) || defined(XFS_WARN)
  261. int
  262. xfs_isilocked(
  263. xfs_inode_t *ip,
  264. uint lock_flags)
  265. {
  266. if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
  267. if (!(lock_flags & XFS_ILOCK_SHARED))
  268. return !!ip->i_lock.mr_writer;
  269. return rwsem_is_locked(&ip->i_lock.mr_lock);
  270. }
  271. if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
  272. if (!(lock_flags & XFS_IOLOCK_SHARED))
  273. return !!ip->i_iolock.mr_writer;
  274. return rwsem_is_locked(&ip->i_iolock.mr_lock);
  275. }
  276. ASSERT(0);
  277. return 0;
  278. }
  279. #endif
  280. void
  281. __xfs_iflock(
  282. struct xfs_inode *ip)
  283. {
  284. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
  285. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
  286. do {
  287. prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  288. if (xfs_isiflocked(ip))
  289. io_schedule();
  290. } while (!xfs_iflock_nowait(ip));
  291. finish_wait(wq, &wait.wait);
  292. }
  293. /*
  294. * Check that none of the inode's in the buffer have a next
  295. * unlinked field of 0.
  296. */
  297. #if defined(DEBUG)
  298. void
  299. xfs_inobp_check(
  300. xfs_mount_t *mp,
  301. xfs_buf_t *bp)
  302. {
  303. int i;
  304. int j;
  305. xfs_dinode_t *dip;
  306. j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
  307. for (i = 0; i < j; i++) {
  308. dip = (xfs_dinode_t *)xfs_buf_offset(bp,
  309. i * mp->m_sb.sb_inodesize);
  310. if (!dip->di_next_unlinked) {
  311. xfs_alert(mp,
  312. "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
  313. bp);
  314. ASSERT(dip->di_next_unlinked);
  315. }
  316. }
  317. }
  318. #endif
  319. static void
  320. xfs_inode_buf_verify(
  321. struct xfs_buf *bp)
  322. {
  323. struct xfs_mount *mp = bp->b_target->bt_mount;
  324. int i;
  325. int ni;
  326. /*
  327. * Validate the magic number and version of every inode in the buffer
  328. */
  329. ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
  330. for (i = 0; i < ni; i++) {
  331. int di_ok;
  332. xfs_dinode_t *dip;
  333. dip = (struct xfs_dinode *)xfs_buf_offset(bp,
  334. (i << mp->m_sb.sb_inodelog));
  335. di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
  336. XFS_DINODE_GOOD_VERSION(dip->di_version);
  337. if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
  338. XFS_ERRTAG_ITOBP_INOTOBP,
  339. XFS_RANDOM_ITOBP_INOTOBP))) {
  340. xfs_buf_ioerror(bp, EFSCORRUPTED);
  341. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH,
  342. mp, dip);
  343. #ifdef DEBUG
  344. xfs_emerg(mp,
  345. "bad inode magic/vsn daddr %lld #%d (magic=%x)",
  346. (unsigned long long)bp->b_bn, i,
  347. be16_to_cpu(dip->di_magic));
  348. ASSERT(0);
  349. #endif
  350. }
  351. }
  352. xfs_inobp_check(mp, bp);
  353. }
  354. static void
  355. xfs_inode_buf_read_verify(
  356. struct xfs_buf *bp)
  357. {
  358. xfs_inode_buf_verify(bp);
  359. }
  360. static void
  361. xfs_inode_buf_write_verify(
  362. struct xfs_buf *bp)
  363. {
  364. xfs_inode_buf_verify(bp);
  365. }
  366. const struct xfs_buf_ops xfs_inode_buf_ops = {
  367. .verify_read = xfs_inode_buf_read_verify,
  368. .verify_write = xfs_inode_buf_write_verify,
  369. };
  370. /*
  371. * This routine is called to map an inode to the buffer containing the on-disk
  372. * version of the inode. It returns a pointer to the buffer containing the
  373. * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
  374. * pointer to the on-disk inode within that buffer.
  375. *
  376. * If a non-zero error is returned, then the contents of bpp and dipp are
  377. * undefined.
  378. */
  379. int
  380. xfs_imap_to_bp(
  381. struct xfs_mount *mp,
  382. struct xfs_trans *tp,
  383. struct xfs_imap *imap,
  384. struct xfs_dinode **dipp,
  385. struct xfs_buf **bpp,
  386. uint buf_flags,
  387. uint iget_flags)
  388. {
  389. struct xfs_buf *bp;
  390. int error;
  391. buf_flags |= XBF_UNMAPPED;
  392. error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
  393. (int)imap->im_len, buf_flags, &bp,
  394. &xfs_inode_buf_ops);
  395. if (error) {
  396. if (error == EAGAIN) {
  397. ASSERT(buf_flags & XBF_TRYLOCK);
  398. return error;
  399. }
  400. if (error == EFSCORRUPTED &&
  401. (iget_flags & XFS_IGET_UNTRUSTED))
  402. return XFS_ERROR(EINVAL);
  403. xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
  404. __func__, error);
  405. return error;
  406. }
  407. *bpp = bp;
  408. *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset);
  409. return 0;
  410. }
  411. STATIC void
  412. xfs_dinode_from_disk(
  413. xfs_icdinode_t *to,
  414. xfs_dinode_t *from)
  415. {
  416. to->di_magic = be16_to_cpu(from->di_magic);
  417. to->di_mode = be16_to_cpu(from->di_mode);
  418. to->di_version = from ->di_version;
  419. to->di_format = from->di_format;
  420. to->di_onlink = be16_to_cpu(from->di_onlink);
  421. to->di_uid = be32_to_cpu(from->di_uid);
  422. to->di_gid = be32_to_cpu(from->di_gid);
  423. to->di_nlink = be32_to_cpu(from->di_nlink);
  424. to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
  425. to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
  426. memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
  427. to->di_flushiter = be16_to_cpu(from->di_flushiter);
  428. to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
  429. to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
  430. to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
  431. to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
  432. to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
  433. to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
  434. to->di_size = be64_to_cpu(from->di_size);
  435. to->di_nblocks = be64_to_cpu(from->di_nblocks);
  436. to->di_extsize = be32_to_cpu(from->di_extsize);
  437. to->di_nextents = be32_to_cpu(from->di_nextents);
  438. to->di_anextents = be16_to_cpu(from->di_anextents);
  439. to->di_forkoff = from->di_forkoff;
  440. to->di_aformat = from->di_aformat;
  441. to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
  442. to->di_dmstate = be16_to_cpu(from->di_dmstate);
  443. to->di_flags = be16_to_cpu(from->di_flags);
  444. to->di_gen = be32_to_cpu(from->di_gen);
  445. if (to->di_version == 3) {
  446. to->di_changecount = be64_to_cpu(from->di_changecount);
  447. to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
  448. to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
  449. to->di_flags2 = be64_to_cpu(from->di_flags2);
  450. to->di_ino = be64_to_cpu(from->di_ino);
  451. to->di_lsn = be64_to_cpu(from->di_lsn);
  452. memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
  453. uuid_copy(&to->di_uuid, &from->di_uuid);
  454. }
  455. }
  456. void
  457. xfs_dinode_to_disk(
  458. xfs_dinode_t *to,
  459. xfs_icdinode_t *from)
  460. {
  461. to->di_magic = cpu_to_be16(from->di_magic);
  462. to->di_mode = cpu_to_be16(from->di_mode);
  463. to->di_version = from ->di_version;
  464. to->di_format = from->di_format;
  465. to->di_onlink = cpu_to_be16(from->di_onlink);
  466. to->di_uid = cpu_to_be32(from->di_uid);
  467. to->di_gid = cpu_to_be32(from->di_gid);
  468. to->di_nlink = cpu_to_be32(from->di_nlink);
  469. to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
  470. to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
  471. memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
  472. to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
  473. to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
  474. to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
  475. to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
  476. to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
  477. to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
  478. to->di_size = cpu_to_be64(from->di_size);
  479. to->di_nblocks = cpu_to_be64(from->di_nblocks);
  480. to->di_extsize = cpu_to_be32(from->di_extsize);
  481. to->di_nextents = cpu_to_be32(from->di_nextents);
  482. to->di_anextents = cpu_to_be16(from->di_anextents);
  483. to->di_forkoff = from->di_forkoff;
  484. to->di_aformat = from->di_aformat;
  485. to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
  486. to->di_dmstate = cpu_to_be16(from->di_dmstate);
  487. to->di_flags = cpu_to_be16(from->di_flags);
  488. to->di_gen = cpu_to_be32(from->di_gen);
  489. if (from->di_version == 3) {
  490. to->di_changecount = cpu_to_be64(from->di_changecount);
  491. to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
  492. to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
  493. to->di_flags2 = cpu_to_be64(from->di_flags2);
  494. to->di_ino = cpu_to_be64(from->di_ino);
  495. to->di_lsn = cpu_to_be64(from->di_lsn);
  496. memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
  497. uuid_copy(&to->di_uuid, &from->di_uuid);
  498. to->di_flushiter = 0;
  499. } else {
  500. to->di_flushiter = cpu_to_be16(from->di_flushiter);
  501. }
  502. }
  503. STATIC uint
  504. _xfs_dic2xflags(
  505. __uint16_t di_flags)
  506. {
  507. uint flags = 0;
  508. if (di_flags & XFS_DIFLAG_ANY) {
  509. if (di_flags & XFS_DIFLAG_REALTIME)
  510. flags |= XFS_XFLAG_REALTIME;
  511. if (di_flags & XFS_DIFLAG_PREALLOC)
  512. flags |= XFS_XFLAG_PREALLOC;
  513. if (di_flags & XFS_DIFLAG_IMMUTABLE)
  514. flags |= XFS_XFLAG_IMMUTABLE;
  515. if (di_flags & XFS_DIFLAG_APPEND)
  516. flags |= XFS_XFLAG_APPEND;
  517. if (di_flags & XFS_DIFLAG_SYNC)
  518. flags |= XFS_XFLAG_SYNC;
  519. if (di_flags & XFS_DIFLAG_NOATIME)
  520. flags |= XFS_XFLAG_NOATIME;
  521. if (di_flags & XFS_DIFLAG_NODUMP)
  522. flags |= XFS_XFLAG_NODUMP;
  523. if (di_flags & XFS_DIFLAG_RTINHERIT)
  524. flags |= XFS_XFLAG_RTINHERIT;
  525. if (di_flags & XFS_DIFLAG_PROJINHERIT)
  526. flags |= XFS_XFLAG_PROJINHERIT;
  527. if (di_flags & XFS_DIFLAG_NOSYMLINKS)
  528. flags |= XFS_XFLAG_NOSYMLINKS;
  529. if (di_flags & XFS_DIFLAG_EXTSIZE)
  530. flags |= XFS_XFLAG_EXTSIZE;
  531. if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
  532. flags |= XFS_XFLAG_EXTSZINHERIT;
  533. if (di_flags & XFS_DIFLAG_NODEFRAG)
  534. flags |= XFS_XFLAG_NODEFRAG;
  535. if (di_flags & XFS_DIFLAG_FILESTREAM)
  536. flags |= XFS_XFLAG_FILESTREAM;
  537. }
  538. return flags;
  539. }
  540. uint
  541. xfs_ip2xflags(
  542. xfs_inode_t *ip)
  543. {
  544. xfs_icdinode_t *dic = &ip->i_d;
  545. return _xfs_dic2xflags(dic->di_flags) |
  546. (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
  547. }
  548. uint
  549. xfs_dic2xflags(
  550. xfs_dinode_t *dip)
  551. {
  552. return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
  553. (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
  554. }
  555. static bool
  556. xfs_dinode_verify(
  557. struct xfs_mount *mp,
  558. struct xfs_inode *ip,
  559. struct xfs_dinode *dip)
  560. {
  561. if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
  562. return false;
  563. /* only version 3 or greater inodes are extensively verified here */
  564. if (dip->di_version < 3)
  565. return true;
  566. if (!xfs_sb_version_hascrc(&mp->m_sb))
  567. return false;
  568. if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
  569. offsetof(struct xfs_dinode, di_crc)))
  570. return false;
  571. if (be64_to_cpu(dip->di_ino) != ip->i_ino)
  572. return false;
  573. if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_uuid))
  574. return false;
  575. return true;
  576. }
  577. void
  578. xfs_dinode_calc_crc(
  579. struct xfs_mount *mp,
  580. struct xfs_dinode *dip)
  581. {
  582. __uint32_t crc;
  583. if (dip->di_version < 3)
  584. return;
  585. ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
  586. crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
  587. offsetof(struct xfs_dinode, di_crc));
  588. dip->di_crc = xfs_end_cksum(crc);
  589. }
  590. /*
  591. * Read the disk inode attributes into the in-core inode structure.
  592. *
  593. * For version 5 superblocks, if we are initialising a new inode and we are not
  594. * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
  595. * inode core with a random generation number. If we are keeping inodes around,
  596. * we need to read the inode cluster to get the existing generation number off
  597. * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
  598. * format) then log recovery is dependent on the di_flushiter field being
  599. * initialised from the current on-disk value and hence we must also read the
  600. * inode off disk.
  601. */
  602. int
  603. xfs_iread(
  604. xfs_mount_t *mp,
  605. xfs_trans_t *tp,
  606. xfs_inode_t *ip,
  607. uint iget_flags)
  608. {
  609. xfs_buf_t *bp;
  610. xfs_dinode_t *dip;
  611. int error;
  612. /*
  613. * Fill in the location information in the in-core inode.
  614. */
  615. error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
  616. if (error)
  617. return error;
  618. /* shortcut IO on inode allocation if possible */
  619. if ((iget_flags & XFS_IGET_CREATE) &&
  620. xfs_sb_version_hascrc(&mp->m_sb) &&
  621. !(mp->m_flags & XFS_MOUNT_IKEEP)) {
  622. /* initialise the on-disk inode core */
  623. memset(&ip->i_d, 0, sizeof(ip->i_d));
  624. ip->i_d.di_magic = XFS_DINODE_MAGIC;
  625. ip->i_d.di_gen = prandom_u32();
  626. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  627. ip->i_d.di_version = 3;
  628. ip->i_d.di_ino = ip->i_ino;
  629. uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid);
  630. } else
  631. ip->i_d.di_version = 2;
  632. return 0;
  633. }
  634. /*
  635. * Get pointers to the on-disk inode and the buffer containing it.
  636. */
  637. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
  638. if (error)
  639. return error;
  640. /* even unallocated inodes are verified */
  641. if (!xfs_dinode_verify(mp, ip, dip)) {
  642. xfs_alert(mp, "%s: validation failed for inode %lld failed",
  643. __func__, ip->i_ino);
  644. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
  645. error = XFS_ERROR(EFSCORRUPTED);
  646. goto out_brelse;
  647. }
  648. /*
  649. * If the on-disk inode is already linked to a directory
  650. * entry, copy all of the inode into the in-core inode.
  651. * xfs_iformat_fork() handles copying in the inode format
  652. * specific information.
  653. * Otherwise, just get the truly permanent information.
  654. */
  655. if (dip->di_mode) {
  656. xfs_dinode_from_disk(&ip->i_d, dip);
  657. error = xfs_iformat_fork(ip, dip);
  658. if (error) {
  659. #ifdef DEBUG
  660. xfs_alert(mp, "%s: xfs_iformat() returned error %d",
  661. __func__, error);
  662. #endif /* DEBUG */
  663. goto out_brelse;
  664. }
  665. } else {
  666. /*
  667. * Partial initialisation of the in-core inode. Just the bits
  668. * that xfs_ialloc won't overwrite or relies on being correct.
  669. */
  670. ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
  671. ip->i_d.di_version = dip->di_version;
  672. ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
  673. ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
  674. if (dip->di_version == 3) {
  675. ip->i_d.di_ino = be64_to_cpu(dip->di_ino);
  676. uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid);
  677. }
  678. /*
  679. * Make sure to pull in the mode here as well in
  680. * case the inode is released without being used.
  681. * This ensures that xfs_inactive() will see that
  682. * the inode is already free and not try to mess
  683. * with the uninitialized part of it.
  684. */
  685. ip->i_d.di_mode = 0;
  686. }
  687. /*
  688. * The inode format changed when we moved the link count and
  689. * made it 32 bits long. If this is an old format inode,
  690. * convert it in memory to look like a new one. If it gets
  691. * flushed to disk we will convert back before flushing or
  692. * logging it. We zero out the new projid field and the old link
  693. * count field. We'll handle clearing the pad field (the remains
  694. * of the old uuid field) when we actually convert the inode to
  695. * the new format. We don't change the version number so that we
  696. * can distinguish this from a real new format inode.
  697. */
  698. if (ip->i_d.di_version == 1) {
  699. ip->i_d.di_nlink = ip->i_d.di_onlink;
  700. ip->i_d.di_onlink = 0;
  701. xfs_set_projid(ip, 0);
  702. }
  703. ip->i_delayed_blks = 0;
  704. /*
  705. * Mark the buffer containing the inode as something to keep
  706. * around for a while. This helps to keep recently accessed
  707. * meta-data in-core longer.
  708. */
  709. xfs_buf_set_ref(bp, XFS_INO_REF);
  710. /*
  711. * Use xfs_trans_brelse() to release the buffer containing the on-disk
  712. * inode, because it was acquired with xfs_trans_read_buf() in
  713. * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
  714. * brelse(). If we're within a transaction, then xfs_trans_brelse()
  715. * will only release the buffer if it is not dirty within the
  716. * transaction. It will be OK to release the buffer in this case,
  717. * because inodes on disk are never destroyed and we will be locking the
  718. * new in-core inode before putting it in the cache where other
  719. * processes can find it. Thus we don't have to worry about the inode
  720. * being changed just because we released the buffer.
  721. */
  722. out_brelse:
  723. xfs_trans_brelse(tp, bp);
  724. return error;
  725. }
  726. /*
  727. * Allocate an inode on disk and return a copy of its in-core version.
  728. * The in-core inode is locked exclusively. Set mode, nlink, and rdev
  729. * appropriately within the inode. The uid and gid for the inode are
  730. * set according to the contents of the given cred structure.
  731. *
  732. * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
  733. * has a free inode available, call xfs_iget() to obtain the in-core
  734. * version of the allocated inode. Finally, fill in the inode and
  735. * log its initial contents. In this case, ialloc_context would be
  736. * set to NULL.
  737. *
  738. * If xfs_dialloc() does not have an available inode, it will replenish
  739. * its supply by doing an allocation. Since we can only do one
  740. * allocation within a transaction without deadlocks, we must commit
  741. * the current transaction before returning the inode itself.
  742. * In this case, therefore, we will set ialloc_context and return.
  743. * The caller should then commit the current transaction, start a new
  744. * transaction, and call xfs_ialloc() again to actually get the inode.
  745. *
  746. * To ensure that some other process does not grab the inode that
  747. * was allocated during the first call to xfs_ialloc(), this routine
  748. * also returns the [locked] bp pointing to the head of the freelist
  749. * as ialloc_context. The caller should hold this buffer across
  750. * the commit and pass it back into this routine on the second call.
  751. *
  752. * If we are allocating quota inodes, we do not have a parent inode
  753. * to attach to or associate with (i.e. pip == NULL) because they
  754. * are not linked into the directory structure - they are attached
  755. * directly to the superblock - and so have no parent.
  756. */
  757. int
  758. xfs_ialloc(
  759. xfs_trans_t *tp,
  760. xfs_inode_t *pip,
  761. umode_t mode,
  762. xfs_nlink_t nlink,
  763. xfs_dev_t rdev,
  764. prid_t prid,
  765. int okalloc,
  766. xfs_buf_t **ialloc_context,
  767. xfs_inode_t **ipp)
  768. {
  769. struct xfs_mount *mp = tp->t_mountp;
  770. xfs_ino_t ino;
  771. xfs_inode_t *ip;
  772. uint flags;
  773. int error;
  774. timespec_t tv;
  775. int filestreams = 0;
  776. /*
  777. * Call the space management code to pick
  778. * the on-disk inode to be allocated.
  779. */
  780. error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
  781. ialloc_context, &ino);
  782. if (error)
  783. return error;
  784. if (*ialloc_context || ino == NULLFSINO) {
  785. *ipp = NULL;
  786. return 0;
  787. }
  788. ASSERT(*ialloc_context == NULL);
  789. /*
  790. * Get the in-core inode with the lock held exclusively.
  791. * This is because we're setting fields here we need
  792. * to prevent others from looking at until we're done.
  793. */
  794. error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
  795. XFS_ILOCK_EXCL, &ip);
  796. if (error)
  797. return error;
  798. ASSERT(ip != NULL);
  799. ip->i_d.di_mode = mode;
  800. ip->i_d.di_onlink = 0;
  801. ip->i_d.di_nlink = nlink;
  802. ASSERT(ip->i_d.di_nlink == nlink);
  803. ip->i_d.di_uid = current_fsuid();
  804. ip->i_d.di_gid = current_fsgid();
  805. xfs_set_projid(ip, prid);
  806. memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
  807. /*
  808. * If the superblock version is up to where we support new format
  809. * inodes and this is currently an old format inode, then change
  810. * the inode version number now. This way we only do the conversion
  811. * here rather than here and in the flush/logging code.
  812. */
  813. if (xfs_sb_version_hasnlink(&mp->m_sb) &&
  814. ip->i_d.di_version == 1) {
  815. ip->i_d.di_version = 2;
  816. /*
  817. * We've already zeroed the old link count, the projid field,
  818. * and the pad field.
  819. */
  820. }
  821. /*
  822. * Project ids won't be stored on disk if we are using a version 1 inode.
  823. */
  824. if ((prid != 0) && (ip->i_d.di_version == 1))
  825. xfs_bump_ino_vers2(tp, ip);
  826. if (pip && XFS_INHERIT_GID(pip)) {
  827. ip->i_d.di_gid = pip->i_d.di_gid;
  828. if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
  829. ip->i_d.di_mode |= S_ISGID;
  830. }
  831. }
  832. /*
  833. * If the group ID of the new file does not match the effective group
  834. * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
  835. * (and only if the irix_sgid_inherit compatibility variable is set).
  836. */
  837. if ((irix_sgid_inherit) &&
  838. (ip->i_d.di_mode & S_ISGID) &&
  839. (!in_group_p((gid_t)ip->i_d.di_gid))) {
  840. ip->i_d.di_mode &= ~S_ISGID;
  841. }
  842. ip->i_d.di_size = 0;
  843. ip->i_d.di_nextents = 0;
  844. ASSERT(ip->i_d.di_nblocks == 0);
  845. nanotime(&tv);
  846. ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
  847. ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
  848. ip->i_d.di_atime = ip->i_d.di_mtime;
  849. ip->i_d.di_ctime = ip->i_d.di_mtime;
  850. /*
  851. * di_gen will have been taken care of in xfs_iread.
  852. */
  853. ip->i_d.di_extsize = 0;
  854. ip->i_d.di_dmevmask = 0;
  855. ip->i_d.di_dmstate = 0;
  856. ip->i_d.di_flags = 0;
  857. if (ip->i_d.di_version == 3) {
  858. ASSERT(ip->i_d.di_ino == ino);
  859. ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid));
  860. ip->i_d.di_crc = 0;
  861. ip->i_d.di_changecount = 1;
  862. ip->i_d.di_lsn = 0;
  863. ip->i_d.di_flags2 = 0;
  864. memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2));
  865. ip->i_d.di_crtime = ip->i_d.di_mtime;
  866. }
  867. flags = XFS_ILOG_CORE;
  868. switch (mode & S_IFMT) {
  869. case S_IFIFO:
  870. case S_IFCHR:
  871. case S_IFBLK:
  872. case S_IFSOCK:
  873. ip->i_d.di_format = XFS_DINODE_FMT_DEV;
  874. ip->i_df.if_u2.if_rdev = rdev;
  875. ip->i_df.if_flags = 0;
  876. flags |= XFS_ILOG_DEV;
  877. break;
  878. case S_IFREG:
  879. /*
  880. * we can't set up filestreams until after the VFS inode
  881. * is set up properly.
  882. */
  883. if (pip && xfs_inode_is_filestream(pip))
  884. filestreams = 1;
  885. /* fall through */
  886. case S_IFDIR:
  887. if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
  888. uint di_flags = 0;
  889. if (S_ISDIR(mode)) {
  890. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  891. di_flags |= XFS_DIFLAG_RTINHERIT;
  892. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  893. di_flags |= XFS_DIFLAG_EXTSZINHERIT;
  894. ip->i_d.di_extsize = pip->i_d.di_extsize;
  895. }
  896. } else if (S_ISREG(mode)) {
  897. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  898. di_flags |= XFS_DIFLAG_REALTIME;
  899. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  900. di_flags |= XFS_DIFLAG_EXTSIZE;
  901. ip->i_d.di_extsize = pip->i_d.di_extsize;
  902. }
  903. }
  904. if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
  905. xfs_inherit_noatime)
  906. di_flags |= XFS_DIFLAG_NOATIME;
  907. if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
  908. xfs_inherit_nodump)
  909. di_flags |= XFS_DIFLAG_NODUMP;
  910. if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
  911. xfs_inherit_sync)
  912. di_flags |= XFS_DIFLAG_SYNC;
  913. if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
  914. xfs_inherit_nosymlinks)
  915. di_flags |= XFS_DIFLAG_NOSYMLINKS;
  916. if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
  917. di_flags |= XFS_DIFLAG_PROJINHERIT;
  918. if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
  919. xfs_inherit_nodefrag)
  920. di_flags |= XFS_DIFLAG_NODEFRAG;
  921. if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
  922. di_flags |= XFS_DIFLAG_FILESTREAM;
  923. ip->i_d.di_flags |= di_flags;
  924. }
  925. /* FALLTHROUGH */
  926. case S_IFLNK:
  927. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  928. ip->i_df.if_flags = XFS_IFEXTENTS;
  929. ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
  930. ip->i_df.if_u1.if_extents = NULL;
  931. break;
  932. default:
  933. ASSERT(0);
  934. }
  935. /*
  936. * Attribute fork settings for new inode.
  937. */
  938. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  939. ip->i_d.di_anextents = 0;
  940. /*
  941. * Log the new values stuffed into the inode.
  942. */
  943. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  944. xfs_trans_log_inode(tp, ip, flags);
  945. /* now that we have an i_mode we can setup inode ops and unlock */
  946. xfs_setup_inode(ip);
  947. /* now we have set up the vfs inode we can associate the filestream */
  948. if (filestreams) {
  949. error = xfs_filestream_associate(pip, ip);
  950. if (error < 0)
  951. return -error;
  952. if (!error)
  953. xfs_iflags_set(ip, XFS_IFILESTREAM);
  954. }
  955. *ipp = ip;
  956. return 0;
  957. }
  958. /*
  959. * Free up the underlying blocks past new_size. The new size must be smaller
  960. * than the current size. This routine can be used both for the attribute and
  961. * data fork, and does not modify the inode size, which is left to the caller.
  962. *
  963. * The transaction passed to this routine must have made a permanent log
  964. * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
  965. * given transaction and start new ones, so make sure everything involved in
  966. * the transaction is tidy before calling here. Some transaction will be
  967. * returned to the caller to be committed. The incoming transaction must
  968. * already include the inode, and both inode locks must be held exclusively.
  969. * The inode must also be "held" within the transaction. On return the inode
  970. * will be "held" within the returned transaction. This routine does NOT
  971. * require any disk space to be reserved for it within the transaction.
  972. *
  973. * If we get an error, we must return with the inode locked and linked into the
  974. * current transaction. This keeps things simple for the higher level code,
  975. * because it always knows that the inode is locked and held in the transaction
  976. * that returns to it whether errors occur or not. We don't mark the inode
  977. * dirty on error so that transactions can be easily aborted if possible.
  978. */
  979. int
  980. xfs_itruncate_extents(
  981. struct xfs_trans **tpp,
  982. struct xfs_inode *ip,
  983. int whichfork,
  984. xfs_fsize_t new_size)
  985. {
  986. struct xfs_mount *mp = ip->i_mount;
  987. struct xfs_trans *tp = *tpp;
  988. struct xfs_trans *ntp;
  989. xfs_bmap_free_t free_list;
  990. xfs_fsblock_t first_block;
  991. xfs_fileoff_t first_unmap_block;
  992. xfs_fileoff_t last_block;
  993. xfs_filblks_t unmap_len;
  994. int committed;
  995. int error = 0;
  996. int done = 0;
  997. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  998. ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
  999. xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  1000. ASSERT(new_size <= XFS_ISIZE(ip));
  1001. ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  1002. ASSERT(ip->i_itemp != NULL);
  1003. ASSERT(ip->i_itemp->ili_lock_flags == 0);
  1004. ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
  1005. trace_xfs_itruncate_extents_start(ip, new_size);
  1006. /*
  1007. * Since it is possible for space to become allocated beyond
  1008. * the end of the file (in a crash where the space is allocated
  1009. * but the inode size is not yet updated), simply remove any
  1010. * blocks which show up between the new EOF and the maximum
  1011. * possible file size. If the first block to be removed is
  1012. * beyond the maximum file size (ie it is the same as last_block),
  1013. * then there is nothing to do.
  1014. */
  1015. first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
  1016. last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  1017. if (first_unmap_block == last_block)
  1018. return 0;
  1019. ASSERT(first_unmap_block < last_block);
  1020. unmap_len = last_block - first_unmap_block + 1;
  1021. while (!done) {
  1022. xfs_bmap_init(&free_list, &first_block);
  1023. error = xfs_bunmapi(tp, ip,
  1024. first_unmap_block, unmap_len,
  1025. xfs_bmapi_aflag(whichfork),
  1026. XFS_ITRUNC_MAX_EXTENTS,
  1027. &first_block, &free_list,
  1028. &done);
  1029. if (error)
  1030. goto out_bmap_cancel;
  1031. /*
  1032. * Duplicate the transaction that has the permanent
  1033. * reservation and commit the old transaction.
  1034. */
  1035. error = xfs_bmap_finish(&tp, &free_list, &committed);
  1036. if (committed)
  1037. xfs_trans_ijoin(tp, ip, 0);
  1038. if (error)
  1039. goto out_bmap_cancel;
  1040. if (committed) {
  1041. /*
  1042. * Mark the inode dirty so it will be logged and
  1043. * moved forward in the log as part of every commit.
  1044. */
  1045. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1046. }
  1047. ntp = xfs_trans_dup(tp);
  1048. error = xfs_trans_commit(tp, 0);
  1049. tp = ntp;
  1050. xfs_trans_ijoin(tp, ip, 0);
  1051. if (error)
  1052. goto out;
  1053. /*
  1054. * Transaction commit worked ok so we can drop the extra ticket
  1055. * reference that we gained in xfs_trans_dup()
  1056. */
  1057. xfs_log_ticket_put(tp->t_ticket);
  1058. error = xfs_trans_reserve(tp, 0,
  1059. XFS_ITRUNCATE_LOG_RES(mp), 0,
  1060. XFS_TRANS_PERM_LOG_RES,
  1061. XFS_ITRUNCATE_LOG_COUNT);
  1062. if (error)
  1063. goto out;
  1064. }
  1065. /*
  1066. * Always re-log the inode so that our permanent transaction can keep
  1067. * on rolling it forward in the log.
  1068. */
  1069. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1070. trace_xfs_itruncate_extents_end(ip, new_size);
  1071. out:
  1072. *tpp = tp;
  1073. return error;
  1074. out_bmap_cancel:
  1075. /*
  1076. * If the bunmapi call encounters an error, return to the caller where
  1077. * the transaction can be properly aborted. We just need to make sure
  1078. * we're not holding any resources that we were not when we came in.
  1079. */
  1080. xfs_bmap_cancel(&free_list);
  1081. goto out;
  1082. }
  1083. /*
  1084. * This is called when the inode's link count goes to 0.
  1085. * We place the on-disk inode on a list in the AGI. It
  1086. * will be pulled from this list when the inode is freed.
  1087. */
  1088. int
  1089. xfs_iunlink(
  1090. xfs_trans_t *tp,
  1091. xfs_inode_t *ip)
  1092. {
  1093. xfs_mount_t *mp;
  1094. xfs_agi_t *agi;
  1095. xfs_dinode_t *dip;
  1096. xfs_buf_t *agibp;
  1097. xfs_buf_t *ibp;
  1098. xfs_agino_t agino;
  1099. short bucket_index;
  1100. int offset;
  1101. int error;
  1102. ASSERT(ip->i_d.di_nlink == 0);
  1103. ASSERT(ip->i_d.di_mode != 0);
  1104. mp = tp->t_mountp;
  1105. /*
  1106. * Get the agi buffer first. It ensures lock ordering
  1107. * on the list.
  1108. */
  1109. error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
  1110. if (error)
  1111. return error;
  1112. agi = XFS_BUF_TO_AGI(agibp);
  1113. /*
  1114. * Get the index into the agi hash table for the
  1115. * list this inode will go on.
  1116. */
  1117. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  1118. ASSERT(agino != 0);
  1119. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  1120. ASSERT(agi->agi_unlinked[bucket_index]);
  1121. ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
  1122. if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
  1123. /*
  1124. * There is already another inode in the bucket we need
  1125. * to add ourselves to. Add us at the front of the list.
  1126. * Here we put the head pointer into our next pointer,
  1127. * and then we fall through to point the head at us.
  1128. */
  1129. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1130. 0, 0);
  1131. if (error)
  1132. return error;
  1133. ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
  1134. dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
  1135. offset = ip->i_imap.im_boffset +
  1136. offsetof(xfs_dinode_t, di_next_unlinked);
  1137. /* need to recalc the inode CRC if appropriate */
  1138. xfs_dinode_calc_crc(mp, dip);
  1139. xfs_trans_inode_buf(tp, ibp);
  1140. xfs_trans_log_buf(tp, ibp, offset,
  1141. (offset + sizeof(xfs_agino_t) - 1));
  1142. xfs_inobp_check(mp, ibp);
  1143. }
  1144. /*
  1145. * Point the bucket head pointer at the inode being inserted.
  1146. */
  1147. ASSERT(agino != 0);
  1148. agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
  1149. offset = offsetof(xfs_agi_t, agi_unlinked) +
  1150. (sizeof(xfs_agino_t) * bucket_index);
  1151. xfs_trans_log_buf(tp, agibp, offset,
  1152. (offset + sizeof(xfs_agino_t) - 1));
  1153. return 0;
  1154. }
  1155. /*
  1156. * Pull the on-disk inode from the AGI unlinked list.
  1157. */
  1158. STATIC int
  1159. xfs_iunlink_remove(
  1160. xfs_trans_t *tp,
  1161. xfs_inode_t *ip)
  1162. {
  1163. xfs_ino_t next_ino;
  1164. xfs_mount_t *mp;
  1165. xfs_agi_t *agi;
  1166. xfs_dinode_t *dip;
  1167. xfs_buf_t *agibp;
  1168. xfs_buf_t *ibp;
  1169. xfs_agnumber_t agno;
  1170. xfs_agino_t agino;
  1171. xfs_agino_t next_agino;
  1172. xfs_buf_t *last_ibp;
  1173. xfs_dinode_t *last_dip = NULL;
  1174. short bucket_index;
  1175. int offset, last_offset = 0;
  1176. int error;
  1177. mp = tp->t_mountp;
  1178. agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
  1179. /*
  1180. * Get the agi buffer first. It ensures lock ordering
  1181. * on the list.
  1182. */
  1183. error = xfs_read_agi(mp, tp, agno, &agibp);
  1184. if (error)
  1185. return error;
  1186. agi = XFS_BUF_TO_AGI(agibp);
  1187. /*
  1188. * Get the index into the agi hash table for the
  1189. * list this inode will go on.
  1190. */
  1191. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  1192. ASSERT(agino != 0);
  1193. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  1194. ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
  1195. ASSERT(agi->agi_unlinked[bucket_index]);
  1196. if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
  1197. /*
  1198. * We're at the head of the list. Get the inode's on-disk
  1199. * buffer to see if there is anyone after us on the list.
  1200. * Only modify our next pointer if it is not already NULLAGINO.
  1201. * This saves us the overhead of dealing with the buffer when
  1202. * there is no need to change it.
  1203. */
  1204. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1205. 0, 0);
  1206. if (error) {
  1207. xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
  1208. __func__, error);
  1209. return error;
  1210. }
  1211. next_agino = be32_to_cpu(dip->di_next_unlinked);
  1212. ASSERT(next_agino != 0);
  1213. if (next_agino != NULLAGINO) {
  1214. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  1215. offset = ip->i_imap.im_boffset +
  1216. offsetof(xfs_dinode_t, di_next_unlinked);
  1217. /* need to recalc the inode CRC if appropriate */
  1218. xfs_dinode_calc_crc(mp, dip);
  1219. xfs_trans_inode_buf(tp, ibp);
  1220. xfs_trans_log_buf(tp, ibp, offset,
  1221. (offset + sizeof(xfs_agino_t) - 1));
  1222. xfs_inobp_check(mp, ibp);
  1223. } else {
  1224. xfs_trans_brelse(tp, ibp);
  1225. }
  1226. /*
  1227. * Point the bucket head pointer at the next inode.
  1228. */
  1229. ASSERT(next_agino != 0);
  1230. ASSERT(next_agino != agino);
  1231. agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
  1232. offset = offsetof(xfs_agi_t, agi_unlinked) +
  1233. (sizeof(xfs_agino_t) * bucket_index);
  1234. xfs_trans_log_buf(tp, agibp, offset,
  1235. (offset + sizeof(xfs_agino_t) - 1));
  1236. } else {
  1237. /*
  1238. * We need to search the list for the inode being freed.
  1239. */
  1240. next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
  1241. last_ibp = NULL;
  1242. while (next_agino != agino) {
  1243. struct xfs_imap imap;
  1244. if (last_ibp)
  1245. xfs_trans_brelse(tp, last_ibp);
  1246. imap.im_blkno = 0;
  1247. next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
  1248. error = xfs_imap(mp, tp, next_ino, &imap, 0);
  1249. if (error) {
  1250. xfs_warn(mp,
  1251. "%s: xfs_imap returned error %d.",
  1252. __func__, error);
  1253. return error;
  1254. }
  1255. error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
  1256. &last_ibp, 0, 0);
  1257. if (error) {
  1258. xfs_warn(mp,
  1259. "%s: xfs_imap_to_bp returned error %d.",
  1260. __func__, error);
  1261. return error;
  1262. }
  1263. last_offset = imap.im_boffset;
  1264. next_agino = be32_to_cpu(last_dip->di_next_unlinked);
  1265. ASSERT(next_agino != NULLAGINO);
  1266. ASSERT(next_agino != 0);
  1267. }
  1268. /*
  1269. * Now last_ibp points to the buffer previous to us on the
  1270. * unlinked list. Pull us from the list.
  1271. */
  1272. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  1273. 0, 0);
  1274. if (error) {
  1275. xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
  1276. __func__, error);
  1277. return error;
  1278. }
  1279. next_agino = be32_to_cpu(dip->di_next_unlinked);
  1280. ASSERT(next_agino != 0);
  1281. ASSERT(next_agino != agino);
  1282. if (next_agino != NULLAGINO) {
  1283. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  1284. offset = ip->i_imap.im_boffset +
  1285. offsetof(xfs_dinode_t, di_next_unlinked);
  1286. /* need to recalc the inode CRC if appropriate */
  1287. xfs_dinode_calc_crc(mp, dip);
  1288. xfs_trans_inode_buf(tp, ibp);
  1289. xfs_trans_log_buf(tp, ibp, offset,
  1290. (offset + sizeof(xfs_agino_t) - 1));
  1291. xfs_inobp_check(mp, ibp);
  1292. } else {
  1293. xfs_trans_brelse(tp, ibp);
  1294. }
  1295. /*
  1296. * Point the previous inode on the list to the next inode.
  1297. */
  1298. last_dip->di_next_unlinked = cpu_to_be32(next_agino);
  1299. ASSERT(next_agino != 0);
  1300. offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
  1301. /* need to recalc the inode CRC if appropriate */
  1302. xfs_dinode_calc_crc(mp, last_dip);
  1303. xfs_trans_inode_buf(tp, last_ibp);
  1304. xfs_trans_log_buf(tp, last_ibp, offset,
  1305. (offset + sizeof(xfs_agino_t) - 1));
  1306. xfs_inobp_check(mp, last_ibp);
  1307. }
  1308. return 0;
  1309. }
  1310. /*
  1311. * A big issue when freeing the inode cluster is is that we _cannot_ skip any
  1312. * inodes that are in memory - they all must be marked stale and attached to
  1313. * the cluster buffer.
  1314. */
  1315. STATIC int
  1316. xfs_ifree_cluster(
  1317. xfs_inode_t *free_ip,
  1318. xfs_trans_t *tp,
  1319. xfs_ino_t inum)
  1320. {
  1321. xfs_mount_t *mp = free_ip->i_mount;
  1322. int blks_per_cluster;
  1323. int nbufs;
  1324. int ninodes;
  1325. int i, j;
  1326. xfs_daddr_t blkno;
  1327. xfs_buf_t *bp;
  1328. xfs_inode_t *ip;
  1329. xfs_inode_log_item_t *iip;
  1330. xfs_log_item_t *lip;
  1331. struct xfs_perag *pag;
  1332. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
  1333. if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
  1334. blks_per_cluster = 1;
  1335. ninodes = mp->m_sb.sb_inopblock;
  1336. nbufs = XFS_IALLOC_BLOCKS(mp);
  1337. } else {
  1338. blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
  1339. mp->m_sb.sb_blocksize;
  1340. ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
  1341. nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
  1342. }
  1343. for (j = 0; j < nbufs; j++, inum += ninodes) {
  1344. blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
  1345. XFS_INO_TO_AGBNO(mp, inum));
  1346. /*
  1347. * We obtain and lock the backing buffer first in the process
  1348. * here, as we have to ensure that any dirty inode that we
  1349. * can't get the flush lock on is attached to the buffer.
  1350. * If we scan the in-memory inodes first, then buffer IO can
  1351. * complete before we get a lock on it, and hence we may fail
  1352. * to mark all the active inodes on the buffer stale.
  1353. */
  1354. bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
  1355. mp->m_bsize * blks_per_cluster,
  1356. XBF_UNMAPPED);
  1357. if (!bp)
  1358. return ENOMEM;
  1359. /*
  1360. * This buffer may not have been correctly initialised as we
  1361. * didn't read it from disk. That's not important because we are
  1362. * only using to mark the buffer as stale in the log, and to
  1363. * attach stale cached inodes on it. That means it will never be
  1364. * dispatched for IO. If it is, we want to know about it, and we
  1365. * want it to fail. We can acheive this by adding a write
  1366. * verifier to the buffer.
  1367. */
  1368. bp->b_ops = &xfs_inode_buf_ops;
  1369. /*
  1370. * Walk the inodes already attached to the buffer and mark them
  1371. * stale. These will all have the flush locks held, so an
  1372. * in-memory inode walk can't lock them. By marking them all
  1373. * stale first, we will not attempt to lock them in the loop
  1374. * below as the XFS_ISTALE flag will be set.
  1375. */
  1376. lip = bp->b_fspriv;
  1377. while (lip) {
  1378. if (lip->li_type == XFS_LI_INODE) {
  1379. iip = (xfs_inode_log_item_t *)lip;
  1380. ASSERT(iip->ili_logged == 1);
  1381. lip->li_cb = xfs_istale_done;
  1382. xfs_trans_ail_copy_lsn(mp->m_ail,
  1383. &iip->ili_flush_lsn,
  1384. &iip->ili_item.li_lsn);
  1385. xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
  1386. }
  1387. lip = lip->li_bio_list;
  1388. }
  1389. /*
  1390. * For each inode in memory attempt to add it to the inode
  1391. * buffer and set it up for being staled on buffer IO
  1392. * completion. This is safe as we've locked out tail pushing
  1393. * and flushing by locking the buffer.
  1394. *
  1395. * We have already marked every inode that was part of a
  1396. * transaction stale above, which means there is no point in
  1397. * even trying to lock them.
  1398. */
  1399. for (i = 0; i < ninodes; i++) {
  1400. retry:
  1401. rcu_read_lock();
  1402. ip = radix_tree_lookup(&pag->pag_ici_root,
  1403. XFS_INO_TO_AGINO(mp, (inum + i)));
  1404. /* Inode not in memory, nothing to do */
  1405. if (!ip) {
  1406. rcu_read_unlock();
  1407. continue;
  1408. }
  1409. /*
  1410. * because this is an RCU protected lookup, we could
  1411. * find a recently freed or even reallocated inode
  1412. * during the lookup. We need to check under the
  1413. * i_flags_lock for a valid inode here. Skip it if it
  1414. * is not valid, the wrong inode or stale.
  1415. */
  1416. spin_lock(&ip->i_flags_lock);
  1417. if (ip->i_ino != inum + i ||
  1418. __xfs_iflags_test(ip, XFS_ISTALE)) {
  1419. spin_unlock(&ip->i_flags_lock);
  1420. rcu_read_unlock();
  1421. continue;
  1422. }
  1423. spin_unlock(&ip->i_flags_lock);
  1424. /*
  1425. * Don't try to lock/unlock the current inode, but we
  1426. * _cannot_ skip the other inodes that we did not find
  1427. * in the list attached to the buffer and are not
  1428. * already marked stale. If we can't lock it, back off
  1429. * and retry.
  1430. */
  1431. if (ip != free_ip &&
  1432. !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
  1433. rcu_read_unlock();
  1434. delay(1);
  1435. goto retry;
  1436. }
  1437. rcu_read_unlock();
  1438. xfs_iflock(ip);
  1439. xfs_iflags_set(ip, XFS_ISTALE);
  1440. /*
  1441. * we don't need to attach clean inodes or those only
  1442. * with unlogged changes (which we throw away, anyway).
  1443. */
  1444. iip = ip->i_itemp;
  1445. if (!iip || xfs_inode_clean(ip)) {
  1446. ASSERT(ip != free_ip);
  1447. xfs_ifunlock(ip);
  1448. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1449. continue;
  1450. }
  1451. iip->ili_last_fields = iip->ili_fields;
  1452. iip->ili_fields = 0;
  1453. iip->ili_logged = 1;
  1454. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  1455. &iip->ili_item.li_lsn);
  1456. xfs_buf_attach_iodone(bp, xfs_istale_done,
  1457. &iip->ili_item);
  1458. if (ip != free_ip)
  1459. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1460. }
  1461. xfs_trans_stale_inode_buf(tp, bp);
  1462. xfs_trans_binval(tp, bp);
  1463. }
  1464. xfs_perag_put(pag);
  1465. return 0;
  1466. }
  1467. /*
  1468. * This is called to return an inode to the inode free list.
  1469. * The inode should already be truncated to 0 length and have
  1470. * no pages associated with it. This routine also assumes that
  1471. * the inode is already a part of the transaction.
  1472. *
  1473. * The on-disk copy of the inode will have been added to the list
  1474. * of unlinked inodes in the AGI. We need to remove the inode from
  1475. * that list atomically with respect to freeing it here.
  1476. */
  1477. int
  1478. xfs_ifree(
  1479. xfs_trans_t *tp,
  1480. xfs_inode_t *ip,
  1481. xfs_bmap_free_t *flist)
  1482. {
  1483. int error;
  1484. int delete;
  1485. xfs_ino_t first_ino;
  1486. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1487. ASSERT(ip->i_d.di_nlink == 0);
  1488. ASSERT(ip->i_d.di_nextents == 0);
  1489. ASSERT(ip->i_d.di_anextents == 0);
  1490. ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
  1491. ASSERT(ip->i_d.di_nblocks == 0);
  1492. /*
  1493. * Pull the on-disk inode from the AGI unlinked list.
  1494. */
  1495. error = xfs_iunlink_remove(tp, ip);
  1496. if (error)
  1497. return error;
  1498. error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
  1499. if (error)
  1500. return error;
  1501. ip->i_d.di_mode = 0; /* mark incore inode as free */
  1502. ip->i_d.di_flags = 0;
  1503. ip->i_d.di_dmevmask = 0;
  1504. ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
  1505. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  1506. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  1507. /*
  1508. * Bump the generation count so no one will be confused
  1509. * by reincarnations of this inode.
  1510. */
  1511. ip->i_d.di_gen++;
  1512. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1513. if (delete)
  1514. error = xfs_ifree_cluster(ip, tp, first_ino);
  1515. return error;
  1516. }
  1517. /*
  1518. * This is called to unpin an inode. The caller must have the inode locked
  1519. * in at least shared mode so that the buffer cannot be subsequently pinned
  1520. * once someone is waiting for it to be unpinned.
  1521. */
  1522. static void
  1523. xfs_iunpin(
  1524. struct xfs_inode *ip)
  1525. {
  1526. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  1527. trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
  1528. /* Give the log a push to start the unpinning I/O */
  1529. xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
  1530. }
  1531. static void
  1532. __xfs_iunpin_wait(
  1533. struct xfs_inode *ip)
  1534. {
  1535. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
  1536. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
  1537. xfs_iunpin(ip);
  1538. do {
  1539. prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  1540. if (xfs_ipincount(ip))
  1541. io_schedule();
  1542. } while (xfs_ipincount(ip));
  1543. finish_wait(wq, &wait.wait);
  1544. }
  1545. void
  1546. xfs_iunpin_wait(
  1547. struct xfs_inode *ip)
  1548. {
  1549. if (xfs_ipincount(ip))
  1550. __xfs_iunpin_wait(ip);
  1551. }
  1552. STATIC int
  1553. xfs_iflush_cluster(
  1554. xfs_inode_t *ip,
  1555. xfs_buf_t *bp)
  1556. {
  1557. xfs_mount_t *mp = ip->i_mount;
  1558. struct xfs_perag *pag;
  1559. unsigned long first_index, mask;
  1560. unsigned long inodes_per_cluster;
  1561. int ilist_size;
  1562. xfs_inode_t **ilist;
  1563. xfs_inode_t *iq;
  1564. int nr_found;
  1565. int clcount = 0;
  1566. int bufwasdelwri;
  1567. int i;
  1568. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
  1569. inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
  1570. ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
  1571. ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
  1572. if (!ilist)
  1573. goto out_put;
  1574. mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
  1575. first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
  1576. rcu_read_lock();
  1577. /* really need a gang lookup range call here */
  1578. nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
  1579. first_index, inodes_per_cluster);
  1580. if (nr_found == 0)
  1581. goto out_free;
  1582. for (i = 0; i < nr_found; i++) {
  1583. iq = ilist[i];
  1584. if (iq == ip)
  1585. continue;
  1586. /*
  1587. * because this is an RCU protected lookup, we could find a
  1588. * recently freed or even reallocated inode during the lookup.
  1589. * We need to check under the i_flags_lock for a valid inode
  1590. * here. Skip it if it is not valid or the wrong inode.
  1591. */
  1592. spin_lock(&ip->i_flags_lock);
  1593. if (!ip->i_ino ||
  1594. (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
  1595. spin_unlock(&ip->i_flags_lock);
  1596. continue;
  1597. }
  1598. spin_unlock(&ip->i_flags_lock);
  1599. /*
  1600. * Do an un-protected check to see if the inode is dirty and
  1601. * is a candidate for flushing. These checks will be repeated
  1602. * later after the appropriate locks are acquired.
  1603. */
  1604. if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
  1605. continue;
  1606. /*
  1607. * Try to get locks. If any are unavailable or it is pinned,
  1608. * then this inode cannot be flushed and is skipped.
  1609. */
  1610. if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
  1611. continue;
  1612. if (!xfs_iflock_nowait(iq)) {
  1613. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  1614. continue;
  1615. }
  1616. if (xfs_ipincount(iq)) {
  1617. xfs_ifunlock(iq);
  1618. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  1619. continue;
  1620. }
  1621. /*
  1622. * arriving here means that this inode can be flushed. First
  1623. * re-check that it's dirty before flushing.
  1624. */
  1625. if (!xfs_inode_clean(iq)) {
  1626. int error;
  1627. error = xfs_iflush_int(iq, bp);
  1628. if (error) {
  1629. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  1630. goto cluster_corrupt_out;
  1631. }
  1632. clcount++;
  1633. } else {
  1634. xfs_ifunlock(iq);
  1635. }
  1636. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  1637. }
  1638. if (clcount) {
  1639. XFS_STATS_INC(xs_icluster_flushcnt);
  1640. XFS_STATS_ADD(xs_icluster_flushinode, clcount);
  1641. }
  1642. out_free:
  1643. rcu_read_unlock();
  1644. kmem_free(ilist);
  1645. out_put:
  1646. xfs_perag_put(pag);
  1647. return 0;
  1648. cluster_corrupt_out:
  1649. /*
  1650. * Corruption detected in the clustering loop. Invalidate the
  1651. * inode buffer and shut down the filesystem.
  1652. */
  1653. rcu_read_unlock();
  1654. /*
  1655. * Clean up the buffer. If it was delwri, just release it --
  1656. * brelse can handle it with no problems. If not, shut down the
  1657. * filesystem before releasing the buffer.
  1658. */
  1659. bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
  1660. if (bufwasdelwri)
  1661. xfs_buf_relse(bp);
  1662. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  1663. if (!bufwasdelwri) {
  1664. /*
  1665. * Just like incore_relse: if we have b_iodone functions,
  1666. * mark the buffer as an error and call them. Otherwise
  1667. * mark it as stale and brelse.
  1668. */
  1669. if (bp->b_iodone) {
  1670. XFS_BUF_UNDONE(bp);
  1671. xfs_buf_stale(bp);
  1672. xfs_buf_ioerror(bp, EIO);
  1673. xfs_buf_ioend(bp, 0);
  1674. } else {
  1675. xfs_buf_stale(bp);
  1676. xfs_buf_relse(bp);
  1677. }
  1678. }
  1679. /*
  1680. * Unlocks the flush lock
  1681. */
  1682. xfs_iflush_abort(iq, false);
  1683. kmem_free(ilist);
  1684. xfs_perag_put(pag);
  1685. return XFS_ERROR(EFSCORRUPTED);
  1686. }
  1687. /*
  1688. * Flush dirty inode metadata into the backing buffer.
  1689. *
  1690. * The caller must have the inode lock and the inode flush lock held. The
  1691. * inode lock will still be held upon return to the caller, and the inode
  1692. * flush lock will be released after the inode has reached the disk.
  1693. *
  1694. * The caller must write out the buffer returned in *bpp and release it.
  1695. */
  1696. int
  1697. xfs_iflush(
  1698. struct xfs_inode *ip,
  1699. struct xfs_buf **bpp)
  1700. {
  1701. struct xfs_mount *mp = ip->i_mount;
  1702. struct xfs_buf *bp;
  1703. struct xfs_dinode *dip;
  1704. int error;
  1705. XFS_STATS_INC(xs_iflush_count);
  1706. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  1707. ASSERT(xfs_isiflocked(ip));
  1708. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  1709. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  1710. *bpp = NULL;
  1711. xfs_iunpin_wait(ip);
  1712. /*
  1713. * For stale inodes we cannot rely on the backing buffer remaining
  1714. * stale in cache for the remaining life of the stale inode and so
  1715. * xfs_imap_to_bp() below may give us a buffer that no longer contains
  1716. * inodes below. We have to check this after ensuring the inode is
  1717. * unpinned so that it is safe to reclaim the stale inode after the
  1718. * flush call.
  1719. */
  1720. if (xfs_iflags_test(ip, XFS_ISTALE)) {
  1721. xfs_ifunlock(ip);
  1722. return 0;
  1723. }
  1724. /*
  1725. * This may have been unpinned because the filesystem is shutting
  1726. * down forcibly. If that's the case we must not write this inode
  1727. * to disk, because the log record didn't make it to disk.
  1728. *
  1729. * We also have to remove the log item from the AIL in this case,
  1730. * as we wait for an empty AIL as part of the unmount process.
  1731. */
  1732. if (XFS_FORCED_SHUTDOWN(mp)) {
  1733. error = XFS_ERROR(EIO);
  1734. goto abort_out;
  1735. }
  1736. /*
  1737. * Get the buffer containing the on-disk inode.
  1738. */
  1739. error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
  1740. 0);
  1741. if (error || !bp) {
  1742. xfs_ifunlock(ip);
  1743. return error;
  1744. }
  1745. /*
  1746. * First flush out the inode that xfs_iflush was called with.
  1747. */
  1748. error = xfs_iflush_int(ip, bp);
  1749. if (error)
  1750. goto corrupt_out;
  1751. /*
  1752. * If the buffer is pinned then push on the log now so we won't
  1753. * get stuck waiting in the write for too long.
  1754. */
  1755. if (xfs_buf_ispinned(bp))
  1756. xfs_log_force(mp, 0);
  1757. /*
  1758. * inode clustering:
  1759. * see if other inodes can be gathered into this write
  1760. */
  1761. error = xfs_iflush_cluster(ip, bp);
  1762. if (error)
  1763. goto cluster_corrupt_out;
  1764. *bpp = bp;
  1765. return 0;
  1766. corrupt_out:
  1767. xfs_buf_relse(bp);
  1768. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  1769. cluster_corrupt_out:
  1770. error = XFS_ERROR(EFSCORRUPTED);
  1771. abort_out:
  1772. /*
  1773. * Unlocks the flush lock
  1774. */
  1775. xfs_iflush_abort(ip, false);
  1776. return error;
  1777. }
  1778. STATIC int
  1779. xfs_iflush_int(
  1780. struct xfs_inode *ip,
  1781. struct xfs_buf *bp)
  1782. {
  1783. struct xfs_inode_log_item *iip = ip->i_itemp;
  1784. struct xfs_dinode *dip;
  1785. struct xfs_mount *mp = ip->i_mount;
  1786. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  1787. ASSERT(xfs_isiflocked(ip));
  1788. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  1789. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  1790. ASSERT(iip != NULL && iip->ili_fields != 0);
  1791. /* set *dip = inode's place in the buffer */
  1792. dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
  1793. if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
  1794. mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
  1795. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1796. "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
  1797. __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
  1798. goto corrupt_out;
  1799. }
  1800. if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
  1801. mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
  1802. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1803. "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
  1804. __func__, ip->i_ino, ip, ip->i_d.di_magic);
  1805. goto corrupt_out;
  1806. }
  1807. if (S_ISREG(ip->i_d.di_mode)) {
  1808. if (XFS_TEST_ERROR(
  1809. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  1810. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
  1811. mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
  1812. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1813. "%s: Bad regular inode %Lu, ptr 0x%p",
  1814. __func__, ip->i_ino, ip);
  1815. goto corrupt_out;
  1816. }
  1817. } else if (S_ISDIR(ip->i_d.di_mode)) {
  1818. if (XFS_TEST_ERROR(
  1819. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  1820. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
  1821. (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
  1822. mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
  1823. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1824. "%s: Bad directory inode %Lu, ptr 0x%p",
  1825. __func__, ip->i_ino, ip);
  1826. goto corrupt_out;
  1827. }
  1828. }
  1829. if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
  1830. ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
  1831. XFS_RANDOM_IFLUSH_5)) {
  1832. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1833. "%s: detected corrupt incore inode %Lu, "
  1834. "total extents = %d, nblocks = %Ld, ptr 0x%p",
  1835. __func__, ip->i_ino,
  1836. ip->i_d.di_nextents + ip->i_d.di_anextents,
  1837. ip->i_d.di_nblocks, ip);
  1838. goto corrupt_out;
  1839. }
  1840. if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
  1841. mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
  1842. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1843. "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
  1844. __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
  1845. goto corrupt_out;
  1846. }
  1847. /*
  1848. * Inode item log recovery for v1/v2 inodes are dependent on the
  1849. * di_flushiter count for correct sequencing. We bump the flush
  1850. * iteration count so we can detect flushes which postdate a log record
  1851. * during recovery. This is redundant as we now log every change and
  1852. * hence this can't happen but we need to still do it to ensure
  1853. * backwards compatibility with old kernels that predate logging all
  1854. * inode changes.
  1855. */
  1856. if (ip->i_d.di_version < 3)
  1857. ip->i_d.di_flushiter++;
  1858. /*
  1859. * Copy the dirty parts of the inode into the on-disk
  1860. * inode. We always copy out the core of the inode,
  1861. * because if the inode is dirty at all the core must
  1862. * be.
  1863. */
  1864. xfs_dinode_to_disk(dip, &ip->i_d);
  1865. /* Wrap, we never let the log put out DI_MAX_FLUSH */
  1866. if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
  1867. ip->i_d.di_flushiter = 0;
  1868. /*
  1869. * If this is really an old format inode and the superblock version
  1870. * has not been updated to support only new format inodes, then
  1871. * convert back to the old inode format. If the superblock version
  1872. * has been updated, then make the conversion permanent.
  1873. */
  1874. ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
  1875. if (ip->i_d.di_version == 1) {
  1876. if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
  1877. /*
  1878. * Convert it back.
  1879. */
  1880. ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
  1881. dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
  1882. } else {
  1883. /*
  1884. * The superblock version has already been bumped,
  1885. * so just make the conversion to the new inode
  1886. * format permanent.
  1887. */
  1888. ip->i_d.di_version = 2;
  1889. dip->di_version = 2;
  1890. ip->i_d.di_onlink = 0;
  1891. dip->di_onlink = 0;
  1892. memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
  1893. memset(&(dip->di_pad[0]), 0,
  1894. sizeof(dip->di_pad));
  1895. ASSERT(xfs_get_projid(ip) == 0);
  1896. }
  1897. }
  1898. xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
  1899. if (XFS_IFORK_Q(ip))
  1900. xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
  1901. xfs_inobp_check(mp, bp);
  1902. /*
  1903. * We've recorded everything logged in the inode, so we'd like to clear
  1904. * the ili_fields bits so we don't log and flush things unnecessarily.
  1905. * However, we can't stop logging all this information until the data
  1906. * we've copied into the disk buffer is written to disk. If we did we
  1907. * might overwrite the copy of the inode in the log with all the data
  1908. * after re-logging only part of it, and in the face of a crash we
  1909. * wouldn't have all the data we need to recover.
  1910. *
  1911. * What we do is move the bits to the ili_last_fields field. When
  1912. * logging the inode, these bits are moved back to the ili_fields field.
  1913. * In the xfs_iflush_done() routine we clear ili_last_fields, since we
  1914. * know that the information those bits represent is permanently on
  1915. * disk. As long as the flush completes before the inode is logged
  1916. * again, then both ili_fields and ili_last_fields will be cleared.
  1917. *
  1918. * We can play with the ili_fields bits here, because the inode lock
  1919. * must be held exclusively in order to set bits there and the flush
  1920. * lock protects the ili_last_fields bits. Set ili_logged so the flush
  1921. * done routine can tell whether or not to look in the AIL. Also, store
  1922. * the current LSN of the inode so that we can tell whether the item has
  1923. * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
  1924. * need the AIL lock, because it is a 64 bit value that cannot be read
  1925. * atomically.
  1926. */
  1927. iip->ili_last_fields = iip->ili_fields;
  1928. iip->ili_fields = 0;
  1929. iip->ili_logged = 1;
  1930. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  1931. &iip->ili_item.li_lsn);
  1932. /*
  1933. * Attach the function xfs_iflush_done to the inode's
  1934. * buffer. This will remove the inode from the AIL
  1935. * and unlock the inode's flush lock when the inode is
  1936. * completely written to disk.
  1937. */
  1938. xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
  1939. /* update the lsn in the on disk inode if required */
  1940. if (ip->i_d.di_version == 3)
  1941. dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn);
  1942. /* generate the checksum. */
  1943. xfs_dinode_calc_crc(mp, dip);
  1944. ASSERT(bp->b_fspriv != NULL);
  1945. ASSERT(bp->b_iodone != NULL);
  1946. return 0;
  1947. corrupt_out:
  1948. return XFS_ERROR(EFSCORRUPTED);
  1949. }
  1950. /*
  1951. * Test whether it is appropriate to check an inode for and free post EOF
  1952. * blocks. The 'force' parameter determines whether we should also consider
  1953. * regular files that are marked preallocated or append-only.
  1954. */
  1955. bool
  1956. xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
  1957. {
  1958. /* prealloc/delalloc exists only on regular files */
  1959. if (!S_ISREG(ip->i_d.di_mode))
  1960. return false;
  1961. /*
  1962. * Zero sized files with no cached pages and delalloc blocks will not
  1963. * have speculative prealloc/delalloc blocks to remove.
  1964. */
  1965. if (VFS_I(ip)->i_size == 0 &&
  1966. VN_CACHED(VFS_I(ip)) == 0 &&
  1967. ip->i_delayed_blks == 0)
  1968. return false;
  1969. /* If we haven't read in the extent list, then don't do it now. */
  1970. if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
  1971. return false;
  1972. /*
  1973. * Do not free real preallocated or append-only files unless the file
  1974. * has delalloc blocks and we are forced to remove them.
  1975. */
  1976. if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
  1977. if (!force || ip->i_delayed_blks == 0)
  1978. return false;
  1979. return true;
  1980. }