xfs_inode.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <linux/log2.h>
  19. #include "xfs.h"
  20. #include "xfs_fs.h"
  21. #include "xfs_format.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_trans_priv.h"
  26. #include "xfs_sb.h"
  27. #include "xfs_ag.h"
  28. #include "xfs_mount.h"
  29. #include "xfs_bmap_btree.h"
  30. #include "xfs_alloc_btree.h"
  31. #include "xfs_ialloc_btree.h"
  32. #include "xfs_attr_sf.h"
  33. #include "xfs_dinode.h"
  34. #include "xfs_inode.h"
  35. #include "xfs_buf_item.h"
  36. #include "xfs_inode_item.h"
  37. #include "xfs_btree.h"
  38. #include "xfs_alloc.h"
  39. #include "xfs_ialloc.h"
  40. #include "xfs_bmap.h"
  41. #include "xfs_error.h"
  42. #include "xfs_utils.h"
  43. #include "xfs_quota.h"
  44. #include "xfs_filestream.h"
  45. #include "xfs_vnodeops.h"
  46. #include "xfs_cksum.h"
  47. #include "xfs_trace.h"
  48. #include "xfs_icache.h"
  49. kmem_zone_t *xfs_inode_zone;
  50. /*
  51. * Used in xfs_itruncate_extents(). This is the maximum number of extents
  52. * freed from a file in a single transaction.
  53. */
  54. #define XFS_ITRUNC_MAX_EXTENTS 2
  55. STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
  56. /*
  57. * helper function to extract extent size hint from inode
  58. */
  59. xfs_extlen_t
  60. xfs_get_extsz_hint(
  61. struct xfs_inode *ip)
  62. {
  63. if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
  64. return ip->i_d.di_extsize;
  65. if (XFS_IS_REALTIME_INODE(ip))
  66. return ip->i_mount->m_sb.sb_rextsize;
  67. return 0;
  68. }
  69. /*
  70. * This is a wrapper routine around the xfs_ilock() routine used to centralize
  71. * some grungy code. It is used in places that wish to lock the inode solely
  72. * for reading the extents. The reason these places can't just call
  73. * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the
  74. * extents from disk for a file in b-tree format. If the inode is in b-tree
  75. * format, then we need to lock the inode exclusively until the extents are read
  76. * in. Locking it exclusively all the time would limit our parallelism
  77. * unnecessarily, though. What we do instead is check to see if the extents
  78. * have been read in yet, and only lock the inode exclusively if they have not.
  79. *
  80. * The function returns a value which should be given to the corresponding
  81. * xfs_iunlock_map_shared(). This value is the mode in which the lock was
  82. * actually taken.
  83. */
  84. uint
  85. xfs_ilock_map_shared(
  86. xfs_inode_t *ip)
  87. {
  88. uint lock_mode;
  89. if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
  90. ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
  91. lock_mode = XFS_ILOCK_EXCL;
  92. } else {
  93. lock_mode = XFS_ILOCK_SHARED;
  94. }
  95. xfs_ilock(ip, lock_mode);
  96. return lock_mode;
  97. }
  98. /*
  99. * This is simply the unlock routine to go with xfs_ilock_map_shared().
  100. * All it does is call xfs_iunlock() with the given lock_mode.
  101. */
  102. void
  103. xfs_iunlock_map_shared(
  104. xfs_inode_t *ip,
  105. unsigned int lock_mode)
  106. {
  107. xfs_iunlock(ip, lock_mode);
  108. }
  109. /*
  110. * The xfs inode contains 2 locks: a multi-reader lock called the
  111. * i_iolock and a multi-reader lock called the i_lock. This routine
  112. * allows either or both of the locks to be obtained.
  113. *
  114. * The 2 locks should always be ordered so that the IO lock is
  115. * obtained first in order to prevent deadlock.
  116. *
  117. * ip -- the inode being locked
  118. * lock_flags -- this parameter indicates the inode's locks
  119. * to be locked. It can be:
  120. * XFS_IOLOCK_SHARED,
  121. * XFS_IOLOCK_EXCL,
  122. * XFS_ILOCK_SHARED,
  123. * XFS_ILOCK_EXCL,
  124. * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
  125. * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
  126. * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
  127. * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
  128. */
  129. void
  130. xfs_ilock(
  131. xfs_inode_t *ip,
  132. uint lock_flags)
  133. {
  134. trace_xfs_ilock(ip, lock_flags, _RET_IP_);
  135. /*
  136. * You can't set both SHARED and EXCL for the same lock,
  137. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  138. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  139. */
  140. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  141. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  142. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  143. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  144. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  145. if (lock_flags & XFS_IOLOCK_EXCL)
  146. mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  147. else if (lock_flags & XFS_IOLOCK_SHARED)
  148. mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
  149. if (lock_flags & XFS_ILOCK_EXCL)
  150. mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  151. else if (lock_flags & XFS_ILOCK_SHARED)
  152. mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
  153. }
  154. /*
  155. * This is just like xfs_ilock(), except that the caller
  156. * is guaranteed not to sleep. It returns 1 if it gets
  157. * the requested locks and 0 otherwise. If the IO lock is
  158. * obtained but the inode lock cannot be, then the IO lock
  159. * is dropped before returning.
  160. *
  161. * ip -- the inode being locked
  162. * lock_flags -- this parameter indicates the inode's locks to be
  163. * to be locked. See the comment for xfs_ilock() for a list
  164. * of valid values.
  165. */
  166. int
  167. xfs_ilock_nowait(
  168. xfs_inode_t *ip,
  169. uint lock_flags)
  170. {
  171. trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
  172. /*
  173. * You can't set both SHARED and EXCL for the same lock,
  174. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  175. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  176. */
  177. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  178. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  179. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  180. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  181. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  182. if (lock_flags & XFS_IOLOCK_EXCL) {
  183. if (!mrtryupdate(&ip->i_iolock))
  184. goto out;
  185. } else if (lock_flags & XFS_IOLOCK_SHARED) {
  186. if (!mrtryaccess(&ip->i_iolock))
  187. goto out;
  188. }
  189. if (lock_flags & XFS_ILOCK_EXCL) {
  190. if (!mrtryupdate(&ip->i_lock))
  191. goto out_undo_iolock;
  192. } else if (lock_flags & XFS_ILOCK_SHARED) {
  193. if (!mrtryaccess(&ip->i_lock))
  194. goto out_undo_iolock;
  195. }
  196. return 1;
  197. out_undo_iolock:
  198. if (lock_flags & XFS_IOLOCK_EXCL)
  199. mrunlock_excl(&ip->i_iolock);
  200. else if (lock_flags & XFS_IOLOCK_SHARED)
  201. mrunlock_shared(&ip->i_iolock);
  202. out:
  203. return 0;
  204. }
  205. /*
  206. * xfs_iunlock() is used to drop the inode locks acquired with
  207. * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
  208. * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
  209. * that we know which locks to drop.
  210. *
  211. * ip -- the inode being unlocked
  212. * lock_flags -- this parameter indicates the inode's locks to be
  213. * to be unlocked. See the comment for xfs_ilock() for a list
  214. * of valid values for this parameter.
  215. *
  216. */
  217. void
  218. xfs_iunlock(
  219. xfs_inode_t *ip,
  220. uint lock_flags)
  221. {
  222. /*
  223. * You can't set both SHARED and EXCL for the same lock,
  224. * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
  225. * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
  226. */
  227. ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
  228. (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
  229. ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
  230. (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  231. ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
  232. ASSERT(lock_flags != 0);
  233. if (lock_flags & XFS_IOLOCK_EXCL)
  234. mrunlock_excl(&ip->i_iolock);
  235. else if (lock_flags & XFS_IOLOCK_SHARED)
  236. mrunlock_shared(&ip->i_iolock);
  237. if (lock_flags & XFS_ILOCK_EXCL)
  238. mrunlock_excl(&ip->i_lock);
  239. else if (lock_flags & XFS_ILOCK_SHARED)
  240. mrunlock_shared(&ip->i_lock);
  241. trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
  242. }
  243. /*
  244. * give up write locks. the i/o lock cannot be held nested
  245. * if it is being demoted.
  246. */
  247. void
  248. xfs_ilock_demote(
  249. xfs_inode_t *ip,
  250. uint lock_flags)
  251. {
  252. ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
  253. ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
  254. if (lock_flags & XFS_ILOCK_EXCL)
  255. mrdemote(&ip->i_lock);
  256. if (lock_flags & XFS_IOLOCK_EXCL)
  257. mrdemote(&ip->i_iolock);
  258. trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
  259. }
  260. #if defined(DEBUG) || defined(XFS_WARN)
  261. int
  262. xfs_isilocked(
  263. xfs_inode_t *ip,
  264. uint lock_flags)
  265. {
  266. if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
  267. if (!(lock_flags & XFS_ILOCK_SHARED))
  268. return !!ip->i_lock.mr_writer;
  269. return rwsem_is_locked(&ip->i_lock.mr_lock);
  270. }
  271. if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
  272. if (!(lock_flags & XFS_IOLOCK_SHARED))
  273. return !!ip->i_iolock.mr_writer;
  274. return rwsem_is_locked(&ip->i_iolock.mr_lock);
  275. }
  276. ASSERT(0);
  277. return 0;
  278. }
  279. #endif
  280. void
  281. __xfs_iflock(
  282. struct xfs_inode *ip)
  283. {
  284. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
  285. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
  286. do {
  287. prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  288. if (xfs_isiflocked(ip))
  289. io_schedule();
  290. } while (!xfs_iflock_nowait(ip));
  291. finish_wait(wq, &wait.wait);
  292. }
  293. STATIC uint
  294. _xfs_dic2xflags(
  295. __uint16_t di_flags)
  296. {
  297. uint flags = 0;
  298. if (di_flags & XFS_DIFLAG_ANY) {
  299. if (di_flags & XFS_DIFLAG_REALTIME)
  300. flags |= XFS_XFLAG_REALTIME;
  301. if (di_flags & XFS_DIFLAG_PREALLOC)
  302. flags |= XFS_XFLAG_PREALLOC;
  303. if (di_flags & XFS_DIFLAG_IMMUTABLE)
  304. flags |= XFS_XFLAG_IMMUTABLE;
  305. if (di_flags & XFS_DIFLAG_APPEND)
  306. flags |= XFS_XFLAG_APPEND;
  307. if (di_flags & XFS_DIFLAG_SYNC)
  308. flags |= XFS_XFLAG_SYNC;
  309. if (di_flags & XFS_DIFLAG_NOATIME)
  310. flags |= XFS_XFLAG_NOATIME;
  311. if (di_flags & XFS_DIFLAG_NODUMP)
  312. flags |= XFS_XFLAG_NODUMP;
  313. if (di_flags & XFS_DIFLAG_RTINHERIT)
  314. flags |= XFS_XFLAG_RTINHERIT;
  315. if (di_flags & XFS_DIFLAG_PROJINHERIT)
  316. flags |= XFS_XFLAG_PROJINHERIT;
  317. if (di_flags & XFS_DIFLAG_NOSYMLINKS)
  318. flags |= XFS_XFLAG_NOSYMLINKS;
  319. if (di_flags & XFS_DIFLAG_EXTSIZE)
  320. flags |= XFS_XFLAG_EXTSIZE;
  321. if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
  322. flags |= XFS_XFLAG_EXTSZINHERIT;
  323. if (di_flags & XFS_DIFLAG_NODEFRAG)
  324. flags |= XFS_XFLAG_NODEFRAG;
  325. if (di_flags & XFS_DIFLAG_FILESTREAM)
  326. flags |= XFS_XFLAG_FILESTREAM;
  327. }
  328. return flags;
  329. }
  330. uint
  331. xfs_ip2xflags(
  332. xfs_inode_t *ip)
  333. {
  334. xfs_icdinode_t *dic = &ip->i_d;
  335. return _xfs_dic2xflags(dic->di_flags) |
  336. (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
  337. }
  338. uint
  339. xfs_dic2xflags(
  340. xfs_dinode_t *dip)
  341. {
  342. return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
  343. (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
  344. }
  345. /*
  346. * Allocate an inode on disk and return a copy of its in-core version.
  347. * The in-core inode is locked exclusively. Set mode, nlink, and rdev
  348. * appropriately within the inode. The uid and gid for the inode are
  349. * set according to the contents of the given cred structure.
  350. *
  351. * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
  352. * has a free inode available, call xfs_iget() to obtain the in-core
  353. * version of the allocated inode. Finally, fill in the inode and
  354. * log its initial contents. In this case, ialloc_context would be
  355. * set to NULL.
  356. *
  357. * If xfs_dialloc() does not have an available inode, it will replenish
  358. * its supply by doing an allocation. Since we can only do one
  359. * allocation within a transaction without deadlocks, we must commit
  360. * the current transaction before returning the inode itself.
  361. * In this case, therefore, we will set ialloc_context and return.
  362. * The caller should then commit the current transaction, start a new
  363. * transaction, and call xfs_ialloc() again to actually get the inode.
  364. *
  365. * To ensure that some other process does not grab the inode that
  366. * was allocated during the first call to xfs_ialloc(), this routine
  367. * also returns the [locked] bp pointing to the head of the freelist
  368. * as ialloc_context. The caller should hold this buffer across
  369. * the commit and pass it back into this routine on the second call.
  370. *
  371. * If we are allocating quota inodes, we do not have a parent inode
  372. * to attach to or associate with (i.e. pip == NULL) because they
  373. * are not linked into the directory structure - they are attached
  374. * directly to the superblock - and so have no parent.
  375. */
  376. int
  377. xfs_ialloc(
  378. xfs_trans_t *tp,
  379. xfs_inode_t *pip,
  380. umode_t mode,
  381. xfs_nlink_t nlink,
  382. xfs_dev_t rdev,
  383. prid_t prid,
  384. int okalloc,
  385. xfs_buf_t **ialloc_context,
  386. xfs_inode_t **ipp)
  387. {
  388. struct xfs_mount *mp = tp->t_mountp;
  389. xfs_ino_t ino;
  390. xfs_inode_t *ip;
  391. uint flags;
  392. int error;
  393. timespec_t tv;
  394. int filestreams = 0;
  395. /*
  396. * Call the space management code to pick
  397. * the on-disk inode to be allocated.
  398. */
  399. error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
  400. ialloc_context, &ino);
  401. if (error)
  402. return error;
  403. if (*ialloc_context || ino == NULLFSINO) {
  404. *ipp = NULL;
  405. return 0;
  406. }
  407. ASSERT(*ialloc_context == NULL);
  408. /*
  409. * Get the in-core inode with the lock held exclusively.
  410. * This is because we're setting fields here we need
  411. * to prevent others from looking at until we're done.
  412. */
  413. error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
  414. XFS_ILOCK_EXCL, &ip);
  415. if (error)
  416. return error;
  417. ASSERT(ip != NULL);
  418. ip->i_d.di_mode = mode;
  419. ip->i_d.di_onlink = 0;
  420. ip->i_d.di_nlink = nlink;
  421. ASSERT(ip->i_d.di_nlink == nlink);
  422. ip->i_d.di_uid = current_fsuid();
  423. ip->i_d.di_gid = current_fsgid();
  424. xfs_set_projid(ip, prid);
  425. memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
  426. /*
  427. * If the superblock version is up to where we support new format
  428. * inodes and this is currently an old format inode, then change
  429. * the inode version number now. This way we only do the conversion
  430. * here rather than here and in the flush/logging code.
  431. */
  432. if (xfs_sb_version_hasnlink(&mp->m_sb) &&
  433. ip->i_d.di_version == 1) {
  434. ip->i_d.di_version = 2;
  435. /*
  436. * We've already zeroed the old link count, the projid field,
  437. * and the pad field.
  438. */
  439. }
  440. /*
  441. * Project ids won't be stored on disk if we are using a version 1 inode.
  442. */
  443. if ((prid != 0) && (ip->i_d.di_version == 1))
  444. xfs_bump_ino_vers2(tp, ip);
  445. if (pip && XFS_INHERIT_GID(pip)) {
  446. ip->i_d.di_gid = pip->i_d.di_gid;
  447. if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
  448. ip->i_d.di_mode |= S_ISGID;
  449. }
  450. }
  451. /*
  452. * If the group ID of the new file does not match the effective group
  453. * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
  454. * (and only if the irix_sgid_inherit compatibility variable is set).
  455. */
  456. if ((irix_sgid_inherit) &&
  457. (ip->i_d.di_mode & S_ISGID) &&
  458. (!in_group_p((gid_t)ip->i_d.di_gid))) {
  459. ip->i_d.di_mode &= ~S_ISGID;
  460. }
  461. ip->i_d.di_size = 0;
  462. ip->i_d.di_nextents = 0;
  463. ASSERT(ip->i_d.di_nblocks == 0);
  464. nanotime(&tv);
  465. ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
  466. ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
  467. ip->i_d.di_atime = ip->i_d.di_mtime;
  468. ip->i_d.di_ctime = ip->i_d.di_mtime;
  469. /*
  470. * di_gen will have been taken care of in xfs_iread.
  471. */
  472. ip->i_d.di_extsize = 0;
  473. ip->i_d.di_dmevmask = 0;
  474. ip->i_d.di_dmstate = 0;
  475. ip->i_d.di_flags = 0;
  476. if (ip->i_d.di_version == 3) {
  477. ASSERT(ip->i_d.di_ino == ino);
  478. ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_uuid));
  479. ip->i_d.di_crc = 0;
  480. ip->i_d.di_changecount = 1;
  481. ip->i_d.di_lsn = 0;
  482. ip->i_d.di_flags2 = 0;
  483. memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2));
  484. ip->i_d.di_crtime = ip->i_d.di_mtime;
  485. }
  486. flags = XFS_ILOG_CORE;
  487. switch (mode & S_IFMT) {
  488. case S_IFIFO:
  489. case S_IFCHR:
  490. case S_IFBLK:
  491. case S_IFSOCK:
  492. ip->i_d.di_format = XFS_DINODE_FMT_DEV;
  493. ip->i_df.if_u2.if_rdev = rdev;
  494. ip->i_df.if_flags = 0;
  495. flags |= XFS_ILOG_DEV;
  496. break;
  497. case S_IFREG:
  498. /*
  499. * we can't set up filestreams until after the VFS inode
  500. * is set up properly.
  501. */
  502. if (pip && xfs_inode_is_filestream(pip))
  503. filestreams = 1;
  504. /* fall through */
  505. case S_IFDIR:
  506. if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
  507. uint di_flags = 0;
  508. if (S_ISDIR(mode)) {
  509. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  510. di_flags |= XFS_DIFLAG_RTINHERIT;
  511. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  512. di_flags |= XFS_DIFLAG_EXTSZINHERIT;
  513. ip->i_d.di_extsize = pip->i_d.di_extsize;
  514. }
  515. } else if (S_ISREG(mode)) {
  516. if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
  517. di_flags |= XFS_DIFLAG_REALTIME;
  518. if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
  519. di_flags |= XFS_DIFLAG_EXTSIZE;
  520. ip->i_d.di_extsize = pip->i_d.di_extsize;
  521. }
  522. }
  523. if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
  524. xfs_inherit_noatime)
  525. di_flags |= XFS_DIFLAG_NOATIME;
  526. if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
  527. xfs_inherit_nodump)
  528. di_flags |= XFS_DIFLAG_NODUMP;
  529. if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
  530. xfs_inherit_sync)
  531. di_flags |= XFS_DIFLAG_SYNC;
  532. if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
  533. xfs_inherit_nosymlinks)
  534. di_flags |= XFS_DIFLAG_NOSYMLINKS;
  535. if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
  536. di_flags |= XFS_DIFLAG_PROJINHERIT;
  537. if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
  538. xfs_inherit_nodefrag)
  539. di_flags |= XFS_DIFLAG_NODEFRAG;
  540. if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
  541. di_flags |= XFS_DIFLAG_FILESTREAM;
  542. ip->i_d.di_flags |= di_flags;
  543. }
  544. /* FALLTHROUGH */
  545. case S_IFLNK:
  546. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  547. ip->i_df.if_flags = XFS_IFEXTENTS;
  548. ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
  549. ip->i_df.if_u1.if_extents = NULL;
  550. break;
  551. default:
  552. ASSERT(0);
  553. }
  554. /*
  555. * Attribute fork settings for new inode.
  556. */
  557. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  558. ip->i_d.di_anextents = 0;
  559. /*
  560. * Log the new values stuffed into the inode.
  561. */
  562. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  563. xfs_trans_log_inode(tp, ip, flags);
  564. /* now that we have an i_mode we can setup inode ops and unlock */
  565. xfs_setup_inode(ip);
  566. /* now we have set up the vfs inode we can associate the filestream */
  567. if (filestreams) {
  568. error = xfs_filestream_associate(pip, ip);
  569. if (error < 0)
  570. return -error;
  571. if (!error)
  572. xfs_iflags_set(ip, XFS_IFILESTREAM);
  573. }
  574. *ipp = ip;
  575. return 0;
  576. }
  577. /*
  578. * Free up the underlying blocks past new_size. The new size must be smaller
  579. * than the current size. This routine can be used both for the attribute and
  580. * data fork, and does not modify the inode size, which is left to the caller.
  581. *
  582. * The transaction passed to this routine must have made a permanent log
  583. * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
  584. * given transaction and start new ones, so make sure everything involved in
  585. * the transaction is tidy before calling here. Some transaction will be
  586. * returned to the caller to be committed. The incoming transaction must
  587. * already include the inode, and both inode locks must be held exclusively.
  588. * The inode must also be "held" within the transaction. On return the inode
  589. * will be "held" within the returned transaction. This routine does NOT
  590. * require any disk space to be reserved for it within the transaction.
  591. *
  592. * If we get an error, we must return with the inode locked and linked into the
  593. * current transaction. This keeps things simple for the higher level code,
  594. * because it always knows that the inode is locked and held in the transaction
  595. * that returns to it whether errors occur or not. We don't mark the inode
  596. * dirty on error so that transactions can be easily aborted if possible.
  597. */
  598. int
  599. xfs_itruncate_extents(
  600. struct xfs_trans **tpp,
  601. struct xfs_inode *ip,
  602. int whichfork,
  603. xfs_fsize_t new_size)
  604. {
  605. struct xfs_mount *mp = ip->i_mount;
  606. struct xfs_trans *tp = *tpp;
  607. struct xfs_trans *ntp;
  608. xfs_bmap_free_t free_list;
  609. xfs_fsblock_t first_block;
  610. xfs_fileoff_t first_unmap_block;
  611. xfs_fileoff_t last_block;
  612. xfs_filblks_t unmap_len;
  613. int committed;
  614. int error = 0;
  615. int done = 0;
  616. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  617. ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
  618. xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  619. ASSERT(new_size <= XFS_ISIZE(ip));
  620. ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
  621. ASSERT(ip->i_itemp != NULL);
  622. ASSERT(ip->i_itemp->ili_lock_flags == 0);
  623. ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
  624. trace_xfs_itruncate_extents_start(ip, new_size);
  625. /*
  626. * Since it is possible for space to become allocated beyond
  627. * the end of the file (in a crash where the space is allocated
  628. * but the inode size is not yet updated), simply remove any
  629. * blocks which show up between the new EOF and the maximum
  630. * possible file size. If the first block to be removed is
  631. * beyond the maximum file size (ie it is the same as last_block),
  632. * then there is nothing to do.
  633. */
  634. first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
  635. last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  636. if (first_unmap_block == last_block)
  637. return 0;
  638. ASSERT(first_unmap_block < last_block);
  639. unmap_len = last_block - first_unmap_block + 1;
  640. while (!done) {
  641. xfs_bmap_init(&free_list, &first_block);
  642. error = xfs_bunmapi(tp, ip,
  643. first_unmap_block, unmap_len,
  644. xfs_bmapi_aflag(whichfork),
  645. XFS_ITRUNC_MAX_EXTENTS,
  646. &first_block, &free_list,
  647. &done);
  648. if (error)
  649. goto out_bmap_cancel;
  650. /*
  651. * Duplicate the transaction that has the permanent
  652. * reservation and commit the old transaction.
  653. */
  654. error = xfs_bmap_finish(&tp, &free_list, &committed);
  655. if (committed)
  656. xfs_trans_ijoin(tp, ip, 0);
  657. if (error)
  658. goto out_bmap_cancel;
  659. if (committed) {
  660. /*
  661. * Mark the inode dirty so it will be logged and
  662. * moved forward in the log as part of every commit.
  663. */
  664. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  665. }
  666. ntp = xfs_trans_dup(tp);
  667. error = xfs_trans_commit(tp, 0);
  668. tp = ntp;
  669. xfs_trans_ijoin(tp, ip, 0);
  670. if (error)
  671. goto out;
  672. /*
  673. * Transaction commit worked ok so we can drop the extra ticket
  674. * reference that we gained in xfs_trans_dup()
  675. */
  676. xfs_log_ticket_put(tp->t_ticket);
  677. error = xfs_trans_reserve(tp, 0,
  678. XFS_ITRUNCATE_LOG_RES(mp), 0,
  679. XFS_TRANS_PERM_LOG_RES,
  680. XFS_ITRUNCATE_LOG_COUNT);
  681. if (error)
  682. goto out;
  683. }
  684. /*
  685. * Always re-log the inode so that our permanent transaction can keep
  686. * on rolling it forward in the log.
  687. */
  688. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  689. trace_xfs_itruncate_extents_end(ip, new_size);
  690. out:
  691. *tpp = tp;
  692. return error;
  693. out_bmap_cancel:
  694. /*
  695. * If the bunmapi call encounters an error, return to the caller where
  696. * the transaction can be properly aborted. We just need to make sure
  697. * we're not holding any resources that we were not when we came in.
  698. */
  699. xfs_bmap_cancel(&free_list);
  700. goto out;
  701. }
  702. /*
  703. * This is called when the inode's link count goes to 0.
  704. * We place the on-disk inode on a list in the AGI. It
  705. * will be pulled from this list when the inode is freed.
  706. */
  707. int
  708. xfs_iunlink(
  709. xfs_trans_t *tp,
  710. xfs_inode_t *ip)
  711. {
  712. xfs_mount_t *mp;
  713. xfs_agi_t *agi;
  714. xfs_dinode_t *dip;
  715. xfs_buf_t *agibp;
  716. xfs_buf_t *ibp;
  717. xfs_agino_t agino;
  718. short bucket_index;
  719. int offset;
  720. int error;
  721. ASSERT(ip->i_d.di_nlink == 0);
  722. ASSERT(ip->i_d.di_mode != 0);
  723. mp = tp->t_mountp;
  724. /*
  725. * Get the agi buffer first. It ensures lock ordering
  726. * on the list.
  727. */
  728. error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
  729. if (error)
  730. return error;
  731. agi = XFS_BUF_TO_AGI(agibp);
  732. /*
  733. * Get the index into the agi hash table for the
  734. * list this inode will go on.
  735. */
  736. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  737. ASSERT(agino != 0);
  738. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  739. ASSERT(agi->agi_unlinked[bucket_index]);
  740. ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
  741. if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
  742. /*
  743. * There is already another inode in the bucket we need
  744. * to add ourselves to. Add us at the front of the list.
  745. * Here we put the head pointer into our next pointer,
  746. * and then we fall through to point the head at us.
  747. */
  748. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  749. 0, 0);
  750. if (error)
  751. return error;
  752. ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
  753. dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
  754. offset = ip->i_imap.im_boffset +
  755. offsetof(xfs_dinode_t, di_next_unlinked);
  756. /* need to recalc the inode CRC if appropriate */
  757. xfs_dinode_calc_crc(mp, dip);
  758. xfs_trans_inode_buf(tp, ibp);
  759. xfs_trans_log_buf(tp, ibp, offset,
  760. (offset + sizeof(xfs_agino_t) - 1));
  761. xfs_inobp_check(mp, ibp);
  762. }
  763. /*
  764. * Point the bucket head pointer at the inode being inserted.
  765. */
  766. ASSERT(agino != 0);
  767. agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
  768. offset = offsetof(xfs_agi_t, agi_unlinked) +
  769. (sizeof(xfs_agino_t) * bucket_index);
  770. xfs_trans_log_buf(tp, agibp, offset,
  771. (offset + sizeof(xfs_agino_t) - 1));
  772. return 0;
  773. }
  774. /*
  775. * Pull the on-disk inode from the AGI unlinked list.
  776. */
  777. STATIC int
  778. xfs_iunlink_remove(
  779. xfs_trans_t *tp,
  780. xfs_inode_t *ip)
  781. {
  782. xfs_ino_t next_ino;
  783. xfs_mount_t *mp;
  784. xfs_agi_t *agi;
  785. xfs_dinode_t *dip;
  786. xfs_buf_t *agibp;
  787. xfs_buf_t *ibp;
  788. xfs_agnumber_t agno;
  789. xfs_agino_t agino;
  790. xfs_agino_t next_agino;
  791. xfs_buf_t *last_ibp;
  792. xfs_dinode_t *last_dip = NULL;
  793. short bucket_index;
  794. int offset, last_offset = 0;
  795. int error;
  796. mp = tp->t_mountp;
  797. agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
  798. /*
  799. * Get the agi buffer first. It ensures lock ordering
  800. * on the list.
  801. */
  802. error = xfs_read_agi(mp, tp, agno, &agibp);
  803. if (error)
  804. return error;
  805. agi = XFS_BUF_TO_AGI(agibp);
  806. /*
  807. * Get the index into the agi hash table for the
  808. * list this inode will go on.
  809. */
  810. agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
  811. ASSERT(agino != 0);
  812. bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
  813. ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
  814. ASSERT(agi->agi_unlinked[bucket_index]);
  815. if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
  816. /*
  817. * We're at the head of the list. Get the inode's on-disk
  818. * buffer to see if there is anyone after us on the list.
  819. * Only modify our next pointer if it is not already NULLAGINO.
  820. * This saves us the overhead of dealing with the buffer when
  821. * there is no need to change it.
  822. */
  823. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  824. 0, 0);
  825. if (error) {
  826. xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
  827. __func__, error);
  828. return error;
  829. }
  830. next_agino = be32_to_cpu(dip->di_next_unlinked);
  831. ASSERT(next_agino != 0);
  832. if (next_agino != NULLAGINO) {
  833. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  834. offset = ip->i_imap.im_boffset +
  835. offsetof(xfs_dinode_t, di_next_unlinked);
  836. /* need to recalc the inode CRC if appropriate */
  837. xfs_dinode_calc_crc(mp, dip);
  838. xfs_trans_inode_buf(tp, ibp);
  839. xfs_trans_log_buf(tp, ibp, offset,
  840. (offset + sizeof(xfs_agino_t) - 1));
  841. xfs_inobp_check(mp, ibp);
  842. } else {
  843. xfs_trans_brelse(tp, ibp);
  844. }
  845. /*
  846. * Point the bucket head pointer at the next inode.
  847. */
  848. ASSERT(next_agino != 0);
  849. ASSERT(next_agino != agino);
  850. agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
  851. offset = offsetof(xfs_agi_t, agi_unlinked) +
  852. (sizeof(xfs_agino_t) * bucket_index);
  853. xfs_trans_log_buf(tp, agibp, offset,
  854. (offset + sizeof(xfs_agino_t) - 1));
  855. } else {
  856. /*
  857. * We need to search the list for the inode being freed.
  858. */
  859. next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
  860. last_ibp = NULL;
  861. while (next_agino != agino) {
  862. struct xfs_imap imap;
  863. if (last_ibp)
  864. xfs_trans_brelse(tp, last_ibp);
  865. imap.im_blkno = 0;
  866. next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
  867. error = xfs_imap(mp, tp, next_ino, &imap, 0);
  868. if (error) {
  869. xfs_warn(mp,
  870. "%s: xfs_imap returned error %d.",
  871. __func__, error);
  872. return error;
  873. }
  874. error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
  875. &last_ibp, 0, 0);
  876. if (error) {
  877. xfs_warn(mp,
  878. "%s: xfs_imap_to_bp returned error %d.",
  879. __func__, error);
  880. return error;
  881. }
  882. last_offset = imap.im_boffset;
  883. next_agino = be32_to_cpu(last_dip->di_next_unlinked);
  884. ASSERT(next_agino != NULLAGINO);
  885. ASSERT(next_agino != 0);
  886. }
  887. /*
  888. * Now last_ibp points to the buffer previous to us on the
  889. * unlinked list. Pull us from the list.
  890. */
  891. error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
  892. 0, 0);
  893. if (error) {
  894. xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
  895. __func__, error);
  896. return error;
  897. }
  898. next_agino = be32_to_cpu(dip->di_next_unlinked);
  899. ASSERT(next_agino != 0);
  900. ASSERT(next_agino != agino);
  901. if (next_agino != NULLAGINO) {
  902. dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
  903. offset = ip->i_imap.im_boffset +
  904. offsetof(xfs_dinode_t, di_next_unlinked);
  905. /* need to recalc the inode CRC if appropriate */
  906. xfs_dinode_calc_crc(mp, dip);
  907. xfs_trans_inode_buf(tp, ibp);
  908. xfs_trans_log_buf(tp, ibp, offset,
  909. (offset + sizeof(xfs_agino_t) - 1));
  910. xfs_inobp_check(mp, ibp);
  911. } else {
  912. xfs_trans_brelse(tp, ibp);
  913. }
  914. /*
  915. * Point the previous inode on the list to the next inode.
  916. */
  917. last_dip->di_next_unlinked = cpu_to_be32(next_agino);
  918. ASSERT(next_agino != 0);
  919. offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
  920. /* need to recalc the inode CRC if appropriate */
  921. xfs_dinode_calc_crc(mp, last_dip);
  922. xfs_trans_inode_buf(tp, last_ibp);
  923. xfs_trans_log_buf(tp, last_ibp, offset,
  924. (offset + sizeof(xfs_agino_t) - 1));
  925. xfs_inobp_check(mp, last_ibp);
  926. }
  927. return 0;
  928. }
  929. /*
  930. * A big issue when freeing the inode cluster is is that we _cannot_ skip any
  931. * inodes that are in memory - they all must be marked stale and attached to
  932. * the cluster buffer.
  933. */
  934. STATIC int
  935. xfs_ifree_cluster(
  936. xfs_inode_t *free_ip,
  937. xfs_trans_t *tp,
  938. xfs_ino_t inum)
  939. {
  940. xfs_mount_t *mp = free_ip->i_mount;
  941. int blks_per_cluster;
  942. int nbufs;
  943. int ninodes;
  944. int i, j;
  945. xfs_daddr_t blkno;
  946. xfs_buf_t *bp;
  947. xfs_inode_t *ip;
  948. xfs_inode_log_item_t *iip;
  949. xfs_log_item_t *lip;
  950. struct xfs_perag *pag;
  951. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
  952. if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
  953. blks_per_cluster = 1;
  954. ninodes = mp->m_sb.sb_inopblock;
  955. nbufs = XFS_IALLOC_BLOCKS(mp);
  956. } else {
  957. blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
  958. mp->m_sb.sb_blocksize;
  959. ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
  960. nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
  961. }
  962. for (j = 0; j < nbufs; j++, inum += ninodes) {
  963. blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
  964. XFS_INO_TO_AGBNO(mp, inum));
  965. /*
  966. * We obtain and lock the backing buffer first in the process
  967. * here, as we have to ensure that any dirty inode that we
  968. * can't get the flush lock on is attached to the buffer.
  969. * If we scan the in-memory inodes first, then buffer IO can
  970. * complete before we get a lock on it, and hence we may fail
  971. * to mark all the active inodes on the buffer stale.
  972. */
  973. bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
  974. mp->m_bsize * blks_per_cluster,
  975. XBF_UNMAPPED);
  976. if (!bp)
  977. return ENOMEM;
  978. /*
  979. * This buffer may not have been correctly initialised as we
  980. * didn't read it from disk. That's not important because we are
  981. * only using to mark the buffer as stale in the log, and to
  982. * attach stale cached inodes on it. That means it will never be
  983. * dispatched for IO. If it is, we want to know about it, and we
  984. * want it to fail. We can acheive this by adding a write
  985. * verifier to the buffer.
  986. */
  987. bp->b_ops = &xfs_inode_buf_ops;
  988. /*
  989. * Walk the inodes already attached to the buffer and mark them
  990. * stale. These will all have the flush locks held, so an
  991. * in-memory inode walk can't lock them. By marking them all
  992. * stale first, we will not attempt to lock them in the loop
  993. * below as the XFS_ISTALE flag will be set.
  994. */
  995. lip = bp->b_fspriv;
  996. while (lip) {
  997. if (lip->li_type == XFS_LI_INODE) {
  998. iip = (xfs_inode_log_item_t *)lip;
  999. ASSERT(iip->ili_logged == 1);
  1000. lip->li_cb = xfs_istale_done;
  1001. xfs_trans_ail_copy_lsn(mp->m_ail,
  1002. &iip->ili_flush_lsn,
  1003. &iip->ili_item.li_lsn);
  1004. xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
  1005. }
  1006. lip = lip->li_bio_list;
  1007. }
  1008. /*
  1009. * For each inode in memory attempt to add it to the inode
  1010. * buffer and set it up for being staled on buffer IO
  1011. * completion. This is safe as we've locked out tail pushing
  1012. * and flushing by locking the buffer.
  1013. *
  1014. * We have already marked every inode that was part of a
  1015. * transaction stale above, which means there is no point in
  1016. * even trying to lock them.
  1017. */
  1018. for (i = 0; i < ninodes; i++) {
  1019. retry:
  1020. rcu_read_lock();
  1021. ip = radix_tree_lookup(&pag->pag_ici_root,
  1022. XFS_INO_TO_AGINO(mp, (inum + i)));
  1023. /* Inode not in memory, nothing to do */
  1024. if (!ip) {
  1025. rcu_read_unlock();
  1026. continue;
  1027. }
  1028. /*
  1029. * because this is an RCU protected lookup, we could
  1030. * find a recently freed or even reallocated inode
  1031. * during the lookup. We need to check under the
  1032. * i_flags_lock for a valid inode here. Skip it if it
  1033. * is not valid, the wrong inode or stale.
  1034. */
  1035. spin_lock(&ip->i_flags_lock);
  1036. if (ip->i_ino != inum + i ||
  1037. __xfs_iflags_test(ip, XFS_ISTALE)) {
  1038. spin_unlock(&ip->i_flags_lock);
  1039. rcu_read_unlock();
  1040. continue;
  1041. }
  1042. spin_unlock(&ip->i_flags_lock);
  1043. /*
  1044. * Don't try to lock/unlock the current inode, but we
  1045. * _cannot_ skip the other inodes that we did not find
  1046. * in the list attached to the buffer and are not
  1047. * already marked stale. If we can't lock it, back off
  1048. * and retry.
  1049. */
  1050. if (ip != free_ip &&
  1051. !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
  1052. rcu_read_unlock();
  1053. delay(1);
  1054. goto retry;
  1055. }
  1056. rcu_read_unlock();
  1057. xfs_iflock(ip);
  1058. xfs_iflags_set(ip, XFS_ISTALE);
  1059. /*
  1060. * we don't need to attach clean inodes or those only
  1061. * with unlogged changes (which we throw away, anyway).
  1062. */
  1063. iip = ip->i_itemp;
  1064. if (!iip || xfs_inode_clean(ip)) {
  1065. ASSERT(ip != free_ip);
  1066. xfs_ifunlock(ip);
  1067. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1068. continue;
  1069. }
  1070. iip->ili_last_fields = iip->ili_fields;
  1071. iip->ili_fields = 0;
  1072. iip->ili_logged = 1;
  1073. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  1074. &iip->ili_item.li_lsn);
  1075. xfs_buf_attach_iodone(bp, xfs_istale_done,
  1076. &iip->ili_item);
  1077. if (ip != free_ip)
  1078. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1079. }
  1080. xfs_trans_stale_inode_buf(tp, bp);
  1081. xfs_trans_binval(tp, bp);
  1082. }
  1083. xfs_perag_put(pag);
  1084. return 0;
  1085. }
  1086. /*
  1087. * This is called to return an inode to the inode free list.
  1088. * The inode should already be truncated to 0 length and have
  1089. * no pages associated with it. This routine also assumes that
  1090. * the inode is already a part of the transaction.
  1091. *
  1092. * The on-disk copy of the inode will have been added to the list
  1093. * of unlinked inodes in the AGI. We need to remove the inode from
  1094. * that list atomically with respect to freeing it here.
  1095. */
  1096. int
  1097. xfs_ifree(
  1098. xfs_trans_t *tp,
  1099. xfs_inode_t *ip,
  1100. xfs_bmap_free_t *flist)
  1101. {
  1102. int error;
  1103. int delete;
  1104. xfs_ino_t first_ino;
  1105. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1106. ASSERT(ip->i_d.di_nlink == 0);
  1107. ASSERT(ip->i_d.di_nextents == 0);
  1108. ASSERT(ip->i_d.di_anextents == 0);
  1109. ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
  1110. ASSERT(ip->i_d.di_nblocks == 0);
  1111. /*
  1112. * Pull the on-disk inode from the AGI unlinked list.
  1113. */
  1114. error = xfs_iunlink_remove(tp, ip);
  1115. if (error)
  1116. return error;
  1117. error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
  1118. if (error)
  1119. return error;
  1120. ip->i_d.di_mode = 0; /* mark incore inode as free */
  1121. ip->i_d.di_flags = 0;
  1122. ip->i_d.di_dmevmask = 0;
  1123. ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
  1124. ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
  1125. ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
  1126. /*
  1127. * Bump the generation count so no one will be confused
  1128. * by reincarnations of this inode.
  1129. */
  1130. ip->i_d.di_gen++;
  1131. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1132. if (delete)
  1133. error = xfs_ifree_cluster(ip, tp, first_ino);
  1134. return error;
  1135. }
  1136. /*
  1137. * This is called to unpin an inode. The caller must have the inode locked
  1138. * in at least shared mode so that the buffer cannot be subsequently pinned
  1139. * once someone is waiting for it to be unpinned.
  1140. */
  1141. static void
  1142. xfs_iunpin(
  1143. struct xfs_inode *ip)
  1144. {
  1145. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  1146. trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
  1147. /* Give the log a push to start the unpinning I/O */
  1148. xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
  1149. }
  1150. static void
  1151. __xfs_iunpin_wait(
  1152. struct xfs_inode *ip)
  1153. {
  1154. wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
  1155. DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
  1156. xfs_iunpin(ip);
  1157. do {
  1158. prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  1159. if (xfs_ipincount(ip))
  1160. io_schedule();
  1161. } while (xfs_ipincount(ip));
  1162. finish_wait(wq, &wait.wait);
  1163. }
  1164. void
  1165. xfs_iunpin_wait(
  1166. struct xfs_inode *ip)
  1167. {
  1168. if (xfs_ipincount(ip))
  1169. __xfs_iunpin_wait(ip);
  1170. }
  1171. STATIC int
  1172. xfs_iflush_cluster(
  1173. xfs_inode_t *ip,
  1174. xfs_buf_t *bp)
  1175. {
  1176. xfs_mount_t *mp = ip->i_mount;
  1177. struct xfs_perag *pag;
  1178. unsigned long first_index, mask;
  1179. unsigned long inodes_per_cluster;
  1180. int ilist_size;
  1181. xfs_inode_t **ilist;
  1182. xfs_inode_t *iq;
  1183. int nr_found;
  1184. int clcount = 0;
  1185. int bufwasdelwri;
  1186. int i;
  1187. pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
  1188. inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
  1189. ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
  1190. ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
  1191. if (!ilist)
  1192. goto out_put;
  1193. mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
  1194. first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
  1195. rcu_read_lock();
  1196. /* really need a gang lookup range call here */
  1197. nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
  1198. first_index, inodes_per_cluster);
  1199. if (nr_found == 0)
  1200. goto out_free;
  1201. for (i = 0; i < nr_found; i++) {
  1202. iq = ilist[i];
  1203. if (iq == ip)
  1204. continue;
  1205. /*
  1206. * because this is an RCU protected lookup, we could find a
  1207. * recently freed or even reallocated inode during the lookup.
  1208. * We need to check under the i_flags_lock for a valid inode
  1209. * here. Skip it if it is not valid or the wrong inode.
  1210. */
  1211. spin_lock(&ip->i_flags_lock);
  1212. if (!ip->i_ino ||
  1213. (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
  1214. spin_unlock(&ip->i_flags_lock);
  1215. continue;
  1216. }
  1217. spin_unlock(&ip->i_flags_lock);
  1218. /*
  1219. * Do an un-protected check to see if the inode is dirty and
  1220. * is a candidate for flushing. These checks will be repeated
  1221. * later after the appropriate locks are acquired.
  1222. */
  1223. if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
  1224. continue;
  1225. /*
  1226. * Try to get locks. If any are unavailable or it is pinned,
  1227. * then this inode cannot be flushed and is skipped.
  1228. */
  1229. if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
  1230. continue;
  1231. if (!xfs_iflock_nowait(iq)) {
  1232. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  1233. continue;
  1234. }
  1235. if (xfs_ipincount(iq)) {
  1236. xfs_ifunlock(iq);
  1237. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  1238. continue;
  1239. }
  1240. /*
  1241. * arriving here means that this inode can be flushed. First
  1242. * re-check that it's dirty before flushing.
  1243. */
  1244. if (!xfs_inode_clean(iq)) {
  1245. int error;
  1246. error = xfs_iflush_int(iq, bp);
  1247. if (error) {
  1248. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  1249. goto cluster_corrupt_out;
  1250. }
  1251. clcount++;
  1252. } else {
  1253. xfs_ifunlock(iq);
  1254. }
  1255. xfs_iunlock(iq, XFS_ILOCK_SHARED);
  1256. }
  1257. if (clcount) {
  1258. XFS_STATS_INC(xs_icluster_flushcnt);
  1259. XFS_STATS_ADD(xs_icluster_flushinode, clcount);
  1260. }
  1261. out_free:
  1262. rcu_read_unlock();
  1263. kmem_free(ilist);
  1264. out_put:
  1265. xfs_perag_put(pag);
  1266. return 0;
  1267. cluster_corrupt_out:
  1268. /*
  1269. * Corruption detected in the clustering loop. Invalidate the
  1270. * inode buffer and shut down the filesystem.
  1271. */
  1272. rcu_read_unlock();
  1273. /*
  1274. * Clean up the buffer. If it was delwri, just release it --
  1275. * brelse can handle it with no problems. If not, shut down the
  1276. * filesystem before releasing the buffer.
  1277. */
  1278. bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
  1279. if (bufwasdelwri)
  1280. xfs_buf_relse(bp);
  1281. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  1282. if (!bufwasdelwri) {
  1283. /*
  1284. * Just like incore_relse: if we have b_iodone functions,
  1285. * mark the buffer as an error and call them. Otherwise
  1286. * mark it as stale and brelse.
  1287. */
  1288. if (bp->b_iodone) {
  1289. XFS_BUF_UNDONE(bp);
  1290. xfs_buf_stale(bp);
  1291. xfs_buf_ioerror(bp, EIO);
  1292. xfs_buf_ioend(bp, 0);
  1293. } else {
  1294. xfs_buf_stale(bp);
  1295. xfs_buf_relse(bp);
  1296. }
  1297. }
  1298. /*
  1299. * Unlocks the flush lock
  1300. */
  1301. xfs_iflush_abort(iq, false);
  1302. kmem_free(ilist);
  1303. xfs_perag_put(pag);
  1304. return XFS_ERROR(EFSCORRUPTED);
  1305. }
  1306. /*
  1307. * Flush dirty inode metadata into the backing buffer.
  1308. *
  1309. * The caller must have the inode lock and the inode flush lock held. The
  1310. * inode lock will still be held upon return to the caller, and the inode
  1311. * flush lock will be released after the inode has reached the disk.
  1312. *
  1313. * The caller must write out the buffer returned in *bpp and release it.
  1314. */
  1315. int
  1316. xfs_iflush(
  1317. struct xfs_inode *ip,
  1318. struct xfs_buf **bpp)
  1319. {
  1320. struct xfs_mount *mp = ip->i_mount;
  1321. struct xfs_buf *bp;
  1322. struct xfs_dinode *dip;
  1323. int error;
  1324. XFS_STATS_INC(xs_iflush_count);
  1325. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  1326. ASSERT(xfs_isiflocked(ip));
  1327. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  1328. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  1329. *bpp = NULL;
  1330. xfs_iunpin_wait(ip);
  1331. /*
  1332. * For stale inodes we cannot rely on the backing buffer remaining
  1333. * stale in cache for the remaining life of the stale inode and so
  1334. * xfs_imap_to_bp() below may give us a buffer that no longer contains
  1335. * inodes below. We have to check this after ensuring the inode is
  1336. * unpinned so that it is safe to reclaim the stale inode after the
  1337. * flush call.
  1338. */
  1339. if (xfs_iflags_test(ip, XFS_ISTALE)) {
  1340. xfs_ifunlock(ip);
  1341. return 0;
  1342. }
  1343. /*
  1344. * This may have been unpinned because the filesystem is shutting
  1345. * down forcibly. If that's the case we must not write this inode
  1346. * to disk, because the log record didn't make it to disk.
  1347. *
  1348. * We also have to remove the log item from the AIL in this case,
  1349. * as we wait for an empty AIL as part of the unmount process.
  1350. */
  1351. if (XFS_FORCED_SHUTDOWN(mp)) {
  1352. error = XFS_ERROR(EIO);
  1353. goto abort_out;
  1354. }
  1355. /*
  1356. * Get the buffer containing the on-disk inode.
  1357. */
  1358. error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
  1359. 0);
  1360. if (error || !bp) {
  1361. xfs_ifunlock(ip);
  1362. return error;
  1363. }
  1364. /*
  1365. * First flush out the inode that xfs_iflush was called with.
  1366. */
  1367. error = xfs_iflush_int(ip, bp);
  1368. if (error)
  1369. goto corrupt_out;
  1370. /*
  1371. * If the buffer is pinned then push on the log now so we won't
  1372. * get stuck waiting in the write for too long.
  1373. */
  1374. if (xfs_buf_ispinned(bp))
  1375. xfs_log_force(mp, 0);
  1376. /*
  1377. * inode clustering:
  1378. * see if other inodes can be gathered into this write
  1379. */
  1380. error = xfs_iflush_cluster(ip, bp);
  1381. if (error)
  1382. goto cluster_corrupt_out;
  1383. *bpp = bp;
  1384. return 0;
  1385. corrupt_out:
  1386. xfs_buf_relse(bp);
  1387. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  1388. cluster_corrupt_out:
  1389. error = XFS_ERROR(EFSCORRUPTED);
  1390. abort_out:
  1391. /*
  1392. * Unlocks the flush lock
  1393. */
  1394. xfs_iflush_abort(ip, false);
  1395. return error;
  1396. }
  1397. STATIC int
  1398. xfs_iflush_int(
  1399. struct xfs_inode *ip,
  1400. struct xfs_buf *bp)
  1401. {
  1402. struct xfs_inode_log_item *iip = ip->i_itemp;
  1403. struct xfs_dinode *dip;
  1404. struct xfs_mount *mp = ip->i_mount;
  1405. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  1406. ASSERT(xfs_isiflocked(ip));
  1407. ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
  1408. ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
  1409. ASSERT(iip != NULL && iip->ili_fields != 0);
  1410. /* set *dip = inode's place in the buffer */
  1411. dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
  1412. if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
  1413. mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
  1414. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1415. "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
  1416. __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
  1417. goto corrupt_out;
  1418. }
  1419. if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
  1420. mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
  1421. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1422. "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
  1423. __func__, ip->i_ino, ip, ip->i_d.di_magic);
  1424. goto corrupt_out;
  1425. }
  1426. if (S_ISREG(ip->i_d.di_mode)) {
  1427. if (XFS_TEST_ERROR(
  1428. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  1429. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
  1430. mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
  1431. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1432. "%s: Bad regular inode %Lu, ptr 0x%p",
  1433. __func__, ip->i_ino, ip);
  1434. goto corrupt_out;
  1435. }
  1436. } else if (S_ISDIR(ip->i_d.di_mode)) {
  1437. if (XFS_TEST_ERROR(
  1438. (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
  1439. (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
  1440. (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
  1441. mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
  1442. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1443. "%s: Bad directory inode %Lu, ptr 0x%p",
  1444. __func__, ip->i_ino, ip);
  1445. goto corrupt_out;
  1446. }
  1447. }
  1448. if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
  1449. ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
  1450. XFS_RANDOM_IFLUSH_5)) {
  1451. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1452. "%s: detected corrupt incore inode %Lu, "
  1453. "total extents = %d, nblocks = %Ld, ptr 0x%p",
  1454. __func__, ip->i_ino,
  1455. ip->i_d.di_nextents + ip->i_d.di_anextents,
  1456. ip->i_d.di_nblocks, ip);
  1457. goto corrupt_out;
  1458. }
  1459. if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
  1460. mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
  1461. xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
  1462. "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
  1463. __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
  1464. goto corrupt_out;
  1465. }
  1466. /*
  1467. * Inode item log recovery for v1/v2 inodes are dependent on the
  1468. * di_flushiter count for correct sequencing. We bump the flush
  1469. * iteration count so we can detect flushes which postdate a log record
  1470. * during recovery. This is redundant as we now log every change and
  1471. * hence this can't happen but we need to still do it to ensure
  1472. * backwards compatibility with old kernels that predate logging all
  1473. * inode changes.
  1474. */
  1475. if (ip->i_d.di_version < 3)
  1476. ip->i_d.di_flushiter++;
  1477. /*
  1478. * Copy the dirty parts of the inode into the on-disk
  1479. * inode. We always copy out the core of the inode,
  1480. * because if the inode is dirty at all the core must
  1481. * be.
  1482. */
  1483. xfs_dinode_to_disk(dip, &ip->i_d);
  1484. /* Wrap, we never let the log put out DI_MAX_FLUSH */
  1485. if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
  1486. ip->i_d.di_flushiter = 0;
  1487. /*
  1488. * If this is really an old format inode and the superblock version
  1489. * has not been updated to support only new format inodes, then
  1490. * convert back to the old inode format. If the superblock version
  1491. * has been updated, then make the conversion permanent.
  1492. */
  1493. ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
  1494. if (ip->i_d.di_version == 1) {
  1495. if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
  1496. /*
  1497. * Convert it back.
  1498. */
  1499. ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
  1500. dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
  1501. } else {
  1502. /*
  1503. * The superblock version has already been bumped,
  1504. * so just make the conversion to the new inode
  1505. * format permanent.
  1506. */
  1507. ip->i_d.di_version = 2;
  1508. dip->di_version = 2;
  1509. ip->i_d.di_onlink = 0;
  1510. dip->di_onlink = 0;
  1511. memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
  1512. memset(&(dip->di_pad[0]), 0,
  1513. sizeof(dip->di_pad));
  1514. ASSERT(xfs_get_projid(ip) == 0);
  1515. }
  1516. }
  1517. xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
  1518. if (XFS_IFORK_Q(ip))
  1519. xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
  1520. xfs_inobp_check(mp, bp);
  1521. /*
  1522. * We've recorded everything logged in the inode, so we'd like to clear
  1523. * the ili_fields bits so we don't log and flush things unnecessarily.
  1524. * However, we can't stop logging all this information until the data
  1525. * we've copied into the disk buffer is written to disk. If we did we
  1526. * might overwrite the copy of the inode in the log with all the data
  1527. * after re-logging only part of it, and in the face of a crash we
  1528. * wouldn't have all the data we need to recover.
  1529. *
  1530. * What we do is move the bits to the ili_last_fields field. When
  1531. * logging the inode, these bits are moved back to the ili_fields field.
  1532. * In the xfs_iflush_done() routine we clear ili_last_fields, since we
  1533. * know that the information those bits represent is permanently on
  1534. * disk. As long as the flush completes before the inode is logged
  1535. * again, then both ili_fields and ili_last_fields will be cleared.
  1536. *
  1537. * We can play with the ili_fields bits here, because the inode lock
  1538. * must be held exclusively in order to set bits there and the flush
  1539. * lock protects the ili_last_fields bits. Set ili_logged so the flush
  1540. * done routine can tell whether or not to look in the AIL. Also, store
  1541. * the current LSN of the inode so that we can tell whether the item has
  1542. * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
  1543. * need the AIL lock, because it is a 64 bit value that cannot be read
  1544. * atomically.
  1545. */
  1546. iip->ili_last_fields = iip->ili_fields;
  1547. iip->ili_fields = 0;
  1548. iip->ili_logged = 1;
  1549. xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
  1550. &iip->ili_item.li_lsn);
  1551. /*
  1552. * Attach the function xfs_iflush_done to the inode's
  1553. * buffer. This will remove the inode from the AIL
  1554. * and unlock the inode's flush lock when the inode is
  1555. * completely written to disk.
  1556. */
  1557. xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
  1558. /* update the lsn in the on disk inode if required */
  1559. if (ip->i_d.di_version == 3)
  1560. dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn);
  1561. /* generate the checksum. */
  1562. xfs_dinode_calc_crc(mp, dip);
  1563. ASSERT(bp->b_fspriv != NULL);
  1564. ASSERT(bp->b_iodone != NULL);
  1565. return 0;
  1566. corrupt_out:
  1567. return XFS_ERROR(EFSCORRUPTED);
  1568. }
  1569. /*
  1570. * Test whether it is appropriate to check an inode for and free post EOF
  1571. * blocks. The 'force' parameter determines whether we should also consider
  1572. * regular files that are marked preallocated or append-only.
  1573. */
  1574. bool
  1575. xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
  1576. {
  1577. /* prealloc/delalloc exists only on regular files */
  1578. if (!S_ISREG(ip->i_d.di_mode))
  1579. return false;
  1580. /*
  1581. * Zero sized files with no cached pages and delalloc blocks will not
  1582. * have speculative prealloc/delalloc blocks to remove.
  1583. */
  1584. if (VFS_I(ip)->i_size == 0 &&
  1585. VN_CACHED(VFS_I(ip)) == 0 &&
  1586. ip->i_delayed_blks == 0)
  1587. return false;
  1588. /* If we haven't read in the extent list, then don't do it now. */
  1589. if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
  1590. return false;
  1591. /*
  1592. * Do not free real preallocated or append-only files unless the file
  1593. * has delalloc blocks and we are forced to remove them.
  1594. */
  1595. if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
  1596. if (!force || ip->i_delayed_blks == 0)
  1597. return false;
  1598. return true;
  1599. }