xfs_iomap.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_bit.h"
  21. #include "xfs_log.h"
  22. #include "xfs_inum.h"
  23. #include "xfs_trans.h"
  24. #include "xfs_sb.h"
  25. #include "xfs_ag.h"
  26. #include "xfs_dir2.h"
  27. #include "xfs_alloc.h"
  28. #include "xfs_dmapi.h"
  29. #include "xfs_quota.h"
  30. #include "xfs_mount.h"
  31. #include "xfs_bmap_btree.h"
  32. #include "xfs_alloc_btree.h"
  33. #include "xfs_ialloc_btree.h"
  34. #include "xfs_dir2_sf.h"
  35. #include "xfs_attr_sf.h"
  36. #include "xfs_dinode.h"
  37. #include "xfs_inode.h"
  38. #include "xfs_ialloc.h"
  39. #include "xfs_btree.h"
  40. #include "xfs_bmap.h"
  41. #include "xfs_rtalloc.h"
  42. #include "xfs_error.h"
  43. #include "xfs_itable.h"
  44. #include "xfs_rw.h"
  45. #include "xfs_acl.h"
  46. #include "xfs_cap.h"
  47. #include "xfs_mac.h"
  48. #include "xfs_attr.h"
  49. #include "xfs_buf_item.h"
  50. #include "xfs_trans_space.h"
  51. #include "xfs_utils.h"
  52. #include "xfs_iomap.h"
  53. #if defined(XFS_RW_TRACE)
  54. void
  55. xfs_iomap_enter_trace(
  56. int tag,
  57. xfs_iocore_t *io,
  58. xfs_off_t offset,
  59. ssize_t count)
  60. {
  61. xfs_inode_t *ip = XFS_IO_INODE(io);
  62. if (!ip->i_rwtrace)
  63. return;
  64. ktrace_enter(ip->i_rwtrace,
  65. (void *)((unsigned long)tag),
  66. (void *)ip,
  67. (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
  68. (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
  69. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  70. (void *)((unsigned long)(offset & 0xffffffff)),
  71. (void *)((unsigned long)count),
  72. (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
  73. (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
  74. (void *)((unsigned long)current_pid()),
  75. (void *)NULL,
  76. (void *)NULL,
  77. (void *)NULL,
  78. (void *)NULL,
  79. (void *)NULL,
  80. (void *)NULL);
  81. }
  82. void
  83. xfs_iomap_map_trace(
  84. int tag,
  85. xfs_iocore_t *io,
  86. xfs_off_t offset,
  87. ssize_t count,
  88. xfs_iomap_t *iomapp,
  89. xfs_bmbt_irec_t *imapp,
  90. int flags)
  91. {
  92. xfs_inode_t *ip = XFS_IO_INODE(io);
  93. if (!ip->i_rwtrace)
  94. return;
  95. ktrace_enter(ip->i_rwtrace,
  96. (void *)((unsigned long)tag),
  97. (void *)ip,
  98. (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
  99. (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
  100. (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
  101. (void *)((unsigned long)(offset & 0xffffffff)),
  102. (void *)((unsigned long)count),
  103. (void *)((unsigned long)flags),
  104. (void *)((unsigned long)((iomapp->iomap_offset >> 32) & 0xffffffff)),
  105. (void *)((unsigned long)(iomapp->iomap_offset & 0xffffffff)),
  106. (void *)((unsigned long)(iomapp->iomap_delta)),
  107. (void *)((unsigned long)(iomapp->iomap_bsize)),
  108. (void *)((unsigned long)(iomapp->iomap_bn)),
  109. (void *)(__psint_t)(imapp->br_startoff),
  110. (void *)((unsigned long)(imapp->br_blockcount)),
  111. (void *)(__psint_t)(imapp->br_startblock));
  112. }
  113. #else
  114. #define xfs_iomap_enter_trace(tag, io, offset, count)
  115. #define xfs_iomap_map_trace(tag, io, offset, count, iomapp, imapp, flags)
  116. #endif
  117. #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
  118. << mp->m_writeio_log)
  119. #define XFS_STRAT_WRITE_IMAPS 2
  120. #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
  121. STATIC int
  122. xfs_imap_to_bmap(
  123. xfs_iocore_t *io,
  124. xfs_off_t offset,
  125. xfs_bmbt_irec_t *imap,
  126. xfs_iomap_t *iomapp,
  127. int imaps, /* Number of imap entries */
  128. int iomaps, /* Number of iomap entries */
  129. int flags)
  130. {
  131. xfs_mount_t *mp;
  132. xfs_fsize_t nisize;
  133. int pbm;
  134. xfs_fsblock_t start_block;
  135. mp = io->io_mount;
  136. nisize = XFS_SIZE(mp, io);
  137. if (io->io_new_size > nisize)
  138. nisize = io->io_new_size;
  139. for (pbm = 0; imaps && pbm < iomaps; imaps--, iomapp++, imap++, pbm++) {
  140. iomapp->iomap_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
  141. iomapp->iomap_delta = offset - iomapp->iomap_offset;
  142. iomapp->iomap_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount);
  143. iomapp->iomap_flags = flags;
  144. if (io->io_flags & XFS_IOCORE_RT) {
  145. iomapp->iomap_flags |= IOMAP_REALTIME;
  146. iomapp->iomap_target = mp->m_rtdev_targp;
  147. } else {
  148. iomapp->iomap_target = mp->m_ddev_targp;
  149. }
  150. start_block = imap->br_startblock;
  151. if (start_block == HOLESTARTBLOCK) {
  152. iomapp->iomap_bn = IOMAP_DADDR_NULL;
  153. iomapp->iomap_flags |= IOMAP_HOLE;
  154. } else if (start_block == DELAYSTARTBLOCK) {
  155. iomapp->iomap_bn = IOMAP_DADDR_NULL;
  156. iomapp->iomap_flags |= IOMAP_DELAY;
  157. } else {
  158. iomapp->iomap_bn = XFS_FSB_TO_DB_IO(io, start_block);
  159. if (ISUNWRITTEN(imap))
  160. iomapp->iomap_flags |= IOMAP_UNWRITTEN;
  161. }
  162. if ((iomapp->iomap_offset + iomapp->iomap_bsize) >= nisize) {
  163. iomapp->iomap_flags |= IOMAP_EOF;
  164. }
  165. offset += iomapp->iomap_bsize - iomapp->iomap_delta;
  166. }
  167. return pbm; /* Return the number filled */
  168. }
  169. int
  170. xfs_iomap(
  171. xfs_iocore_t *io,
  172. xfs_off_t offset,
  173. ssize_t count,
  174. int flags,
  175. xfs_iomap_t *iomapp,
  176. int *niomaps)
  177. {
  178. xfs_mount_t *mp = io->io_mount;
  179. xfs_fileoff_t offset_fsb, end_fsb;
  180. int error = 0;
  181. int lockmode = 0;
  182. xfs_bmbt_irec_t imap;
  183. int nimaps = 1;
  184. int bmapi_flags = 0;
  185. int iomap_flags = 0;
  186. if (XFS_FORCED_SHUTDOWN(mp))
  187. return XFS_ERROR(EIO);
  188. switch (flags &
  189. (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE |
  190. BMAPI_UNWRITTEN | BMAPI_DEVICE)) {
  191. case BMAPI_READ:
  192. xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, io, offset, count);
  193. lockmode = XFS_LCK_MAP_SHARED(mp, io);
  194. bmapi_flags = XFS_BMAPI_ENTIRE;
  195. break;
  196. case BMAPI_WRITE:
  197. xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, io, offset, count);
  198. lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR;
  199. if (flags & BMAPI_IGNSTATE)
  200. bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
  201. XFS_ILOCK(mp, io, lockmode);
  202. break;
  203. case BMAPI_ALLOCATE:
  204. xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, io, offset, count);
  205. lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD;
  206. bmapi_flags = XFS_BMAPI_ENTIRE;
  207. /* Attempt non-blocking lock */
  208. if (flags & BMAPI_TRYLOCK) {
  209. if (!XFS_ILOCK_NOWAIT(mp, io, lockmode))
  210. return XFS_ERROR(EAGAIN);
  211. } else {
  212. XFS_ILOCK(mp, io, lockmode);
  213. }
  214. break;
  215. case BMAPI_UNWRITTEN:
  216. goto phase2;
  217. case BMAPI_DEVICE:
  218. lockmode = XFS_LCK_MAP_SHARED(mp, io);
  219. iomapp->iomap_target = io->io_flags & XFS_IOCORE_RT ?
  220. mp->m_rtdev_targp : mp->m_ddev_targp;
  221. error = 0;
  222. *niomaps = 1;
  223. goto out;
  224. default:
  225. BUG();
  226. }
  227. ASSERT(offset <= mp->m_maxioffset);
  228. if ((xfs_fsize_t)offset + count > mp->m_maxioffset)
  229. count = mp->m_maxioffset - offset;
  230. end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
  231. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  232. error = XFS_BMAPI(mp, NULL, io, offset_fsb,
  233. (xfs_filblks_t)(end_fsb - offset_fsb),
  234. bmapi_flags, NULL, 0, &imap,
  235. &nimaps, NULL, NULL);
  236. if (error)
  237. goto out;
  238. phase2:
  239. switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE|BMAPI_UNWRITTEN)) {
  240. case BMAPI_WRITE:
  241. /* If we found an extent, return it */
  242. if (nimaps &&
  243. (imap.br_startblock != HOLESTARTBLOCK) &&
  244. (imap.br_startblock != DELAYSTARTBLOCK)) {
  245. xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io,
  246. offset, count, iomapp, &imap, flags);
  247. break;
  248. }
  249. if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) {
  250. error = XFS_IOMAP_WRITE_DIRECT(mp, io, offset,
  251. count, flags, &imap, &nimaps, nimaps);
  252. } else {
  253. error = XFS_IOMAP_WRITE_DELAY(mp, io, offset, count,
  254. flags, &imap, &nimaps);
  255. }
  256. if (!error) {
  257. xfs_iomap_map_trace(XFS_IOMAP_ALLOC_MAP, io,
  258. offset, count, iomapp, &imap, flags);
  259. }
  260. iomap_flags = IOMAP_NEW;
  261. break;
  262. case BMAPI_ALLOCATE:
  263. /* If we found an extent, return it */
  264. XFS_IUNLOCK(mp, io, lockmode);
  265. lockmode = 0;
  266. if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock)) {
  267. xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io,
  268. offset, count, iomapp, &imap, flags);
  269. break;
  270. }
  271. error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, offset, count,
  272. &imap, &nimaps);
  273. break;
  274. case BMAPI_UNWRITTEN:
  275. lockmode = 0;
  276. error = XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count);
  277. nimaps = 0;
  278. break;
  279. }
  280. if (nimaps) {
  281. *niomaps = xfs_imap_to_bmap(io, offset, &imap,
  282. iomapp, nimaps, *niomaps, iomap_flags);
  283. } else if (niomaps) {
  284. *niomaps = 0;
  285. }
  286. out:
  287. if (lockmode)
  288. XFS_IUNLOCK(mp, io, lockmode);
  289. return XFS_ERROR(error);
  290. }
  291. STATIC int
  292. xfs_iomap_eof_align_last_fsb(
  293. xfs_mount_t *mp,
  294. xfs_iocore_t *io,
  295. xfs_fsize_t isize,
  296. xfs_extlen_t extsize,
  297. xfs_fileoff_t *last_fsb)
  298. {
  299. xfs_fileoff_t new_last_fsb = 0;
  300. xfs_extlen_t align;
  301. int eof, error;
  302. if (io->io_flags & XFS_IOCORE_RT)
  303. ;
  304. /*
  305. * If mounted with the "-o swalloc" option, roundup the allocation
  306. * request to a stripe width boundary if the file size is >=
  307. * stripe width and we are allocating past the allocation eof.
  308. */
  309. else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) &&
  310. (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)))
  311. new_last_fsb = roundup_64(*last_fsb, mp->m_swidth);
  312. /*
  313. * Roundup the allocation request to a stripe unit (m_dalign) boundary
  314. * if the file size is >= stripe unit size, and we are allocating past
  315. * the allocation eof.
  316. */
  317. else if (mp->m_dalign && (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)))
  318. new_last_fsb = roundup_64(*last_fsb, mp->m_dalign);
  319. /*
  320. * Always round up the allocation request to an extent boundary
  321. * (when file on a real-time subvolume or has di_extsize hint).
  322. */
  323. if (extsize) {
  324. if (new_last_fsb)
  325. align = roundup_64(new_last_fsb, extsize);
  326. else
  327. align = extsize;
  328. new_last_fsb = roundup_64(*last_fsb, align);
  329. }
  330. if (new_last_fsb) {
  331. error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
  332. if (error)
  333. return error;
  334. if (eof)
  335. *last_fsb = new_last_fsb;
  336. }
  337. return 0;
  338. }
  339. STATIC int
  340. xfs_flush_space(
  341. xfs_inode_t *ip,
  342. int *fsynced,
  343. int *ioflags)
  344. {
  345. switch (*fsynced) {
  346. case 0:
  347. if (ip->i_delayed_blks) {
  348. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  349. xfs_flush_inode(ip);
  350. xfs_ilock(ip, XFS_ILOCK_EXCL);
  351. *fsynced = 1;
  352. } else {
  353. *ioflags |= BMAPI_SYNC;
  354. *fsynced = 2;
  355. }
  356. return 0;
  357. case 1:
  358. *fsynced = 2;
  359. *ioflags |= BMAPI_SYNC;
  360. return 0;
  361. case 2:
  362. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  363. xfs_flush_device(ip);
  364. xfs_ilock(ip, XFS_ILOCK_EXCL);
  365. *fsynced = 3;
  366. return 0;
  367. }
  368. return 1;
  369. }
  370. STATIC int
  371. xfs_cmn_err_fsblock_zero(
  372. xfs_inode_t *ip,
  373. xfs_bmbt_irec_t *imap)
  374. {
  375. xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount,
  376. "Access to block zero in inode %llu "
  377. "start_block: %llx start_off: %llx "
  378. "blkcnt: %llx extent-state: %x\n",
  379. (unsigned long long)ip->i_ino,
  380. (unsigned long long)imap->br_startblock,
  381. (unsigned long long)imap->br_startoff,
  382. (unsigned long long)imap->br_blockcount,
  383. imap->br_state);
  384. return EFSCORRUPTED;
  385. }
  386. int
  387. xfs_iomap_write_direct(
  388. xfs_inode_t *ip,
  389. xfs_off_t offset,
  390. size_t count,
  391. int flags,
  392. xfs_bmbt_irec_t *ret_imap,
  393. int *nmaps,
  394. int found)
  395. {
  396. xfs_mount_t *mp = ip->i_mount;
  397. xfs_iocore_t *io = &ip->i_iocore;
  398. xfs_fileoff_t offset_fsb;
  399. xfs_fileoff_t last_fsb;
  400. xfs_filblks_t count_fsb, resaligned;
  401. xfs_fsblock_t firstfsb;
  402. xfs_extlen_t extsz, temp;
  403. xfs_fsize_t isize;
  404. int nimaps;
  405. int bmapi_flag;
  406. int quota_flag;
  407. int rt;
  408. xfs_trans_t *tp;
  409. xfs_bmbt_irec_t imap;
  410. xfs_bmap_free_t free_list;
  411. uint qblocks, resblks, resrtextents;
  412. int committed;
  413. int error;
  414. /*
  415. * Make sure that the dquots are there. This doesn't hold
  416. * the ilock across a disk read.
  417. */
  418. error = XFS_QM_DQATTACH(ip->i_mount, ip, XFS_QMOPT_ILOCKED);
  419. if (error)
  420. return XFS_ERROR(error);
  421. rt = XFS_IS_REALTIME_INODE(ip);
  422. if (unlikely(rt)) {
  423. if (!(extsz = ip->i_d.di_extsize))
  424. extsz = mp->m_sb.sb_rextsize;
  425. } else {
  426. extsz = ip->i_d.di_extsize;
  427. }
  428. isize = ip->i_d.di_size;
  429. if (io->io_new_size > isize)
  430. isize = io->io_new_size;
  431. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  432. last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
  433. if ((offset + count) > isize) {
  434. error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,
  435. &last_fsb);
  436. if (error)
  437. goto error_out;
  438. } else {
  439. if (found && (ret_imap->br_startblock == HOLESTARTBLOCK))
  440. last_fsb = MIN(last_fsb, (xfs_fileoff_t)
  441. ret_imap->br_blockcount +
  442. ret_imap->br_startoff);
  443. }
  444. count_fsb = last_fsb - offset_fsb;
  445. ASSERT(count_fsb > 0);
  446. resaligned = count_fsb;
  447. if (unlikely(extsz)) {
  448. if ((temp = do_mod(offset_fsb, extsz)))
  449. resaligned += temp;
  450. if ((temp = do_mod(resaligned, extsz)))
  451. resaligned += extsz - temp;
  452. }
  453. if (unlikely(rt)) {
  454. resrtextents = qblocks = resaligned;
  455. resrtextents /= mp->m_sb.sb_rextsize;
  456. resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
  457. quota_flag = XFS_QMOPT_RES_RTBLKS;
  458. } else {
  459. resrtextents = 0;
  460. resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
  461. quota_flag = XFS_QMOPT_RES_REGBLKS;
  462. }
  463. /*
  464. * Allocate and setup the transaction
  465. */
  466. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  467. tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
  468. error = xfs_trans_reserve(tp, resblks,
  469. XFS_WRITE_LOG_RES(mp), resrtextents,
  470. XFS_TRANS_PERM_LOG_RES,
  471. XFS_WRITE_LOG_COUNT);
  472. /*
  473. * Check for running out of space, note: need lock to return
  474. */
  475. if (error)
  476. xfs_trans_cancel(tp, 0);
  477. xfs_ilock(ip, XFS_ILOCK_EXCL);
  478. if (error)
  479. goto error_out;
  480. error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,
  481. qblocks, 0, quota_flag);
  482. if (error)
  483. goto error1;
  484. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  485. xfs_trans_ihold(tp, ip);
  486. bmapi_flag = XFS_BMAPI_WRITE;
  487. if ((flags & BMAPI_DIRECT) && (offset < ip->i_d.di_size || extsz))
  488. bmapi_flag |= XFS_BMAPI_PREALLOC;
  489. /*
  490. * Issue the xfs_bmapi() call to allocate the blocks
  491. */
  492. XFS_BMAP_INIT(&free_list, &firstfsb);
  493. nimaps = 1;
  494. error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb, bmapi_flag,
  495. &firstfsb, 0, &imap, &nimaps, &free_list, NULL);
  496. if (error)
  497. goto error0;
  498. /*
  499. * Complete the transaction
  500. */
  501. error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
  502. if (error)
  503. goto error0;
  504. error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
  505. if (error)
  506. goto error_out;
  507. /*
  508. * Copy any maps to caller's array and return any error.
  509. */
  510. if (nimaps == 0) {
  511. error = ENOSPC;
  512. goto error_out;
  513. }
  514. if (unlikely(!imap.br_startblock && !(io->io_flags & XFS_IOCORE_RT))) {
  515. error = xfs_cmn_err_fsblock_zero(ip, &imap);
  516. goto error_out;
  517. }
  518. *ret_imap = imap;
  519. *nmaps = 1;
  520. return 0;
  521. error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
  522. xfs_bmap_cancel(&free_list);
  523. XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag);
  524. error1: /* Just cancel transaction */
  525. xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
  526. *nmaps = 0; /* nothing set-up here */
  527. error_out:
  528. return XFS_ERROR(error);
  529. }
  530. /*
  531. * If the caller is doing a write at the end of the file,
  532. * then extend the allocation out to the file system's write
  533. * iosize. We clean up any extra space left over when the
  534. * file is closed in xfs_inactive().
  535. *
  536. * For sync writes, we are flushing delayed allocate space to
  537. * try to make additional space available for allocation near
  538. * the filesystem full boundary - preallocation hurts in that
  539. * situation, of course.
  540. */
  541. STATIC int
  542. xfs_iomap_eof_want_preallocate(
  543. xfs_mount_t *mp,
  544. xfs_iocore_t *io,
  545. xfs_fsize_t isize,
  546. xfs_off_t offset,
  547. size_t count,
  548. int ioflag,
  549. xfs_bmbt_irec_t *imap,
  550. int nimaps,
  551. int *prealloc)
  552. {
  553. xfs_fileoff_t start_fsb;
  554. xfs_filblks_t count_fsb;
  555. xfs_fsblock_t firstblock;
  556. int n, error, imaps;
  557. *prealloc = 0;
  558. if ((ioflag & BMAPI_SYNC) || (offset + count) <= isize)
  559. return 0;
  560. /*
  561. * If there are any real blocks past eof, then don't
  562. * do any speculative allocation.
  563. */
  564. start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
  565. count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
  566. while (count_fsb > 0) {
  567. imaps = nimaps;
  568. firstblock = NULLFSBLOCK;
  569. error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb, 0,
  570. &firstblock, 0, imap, &imaps, NULL, NULL);
  571. if (error)
  572. return error;
  573. for (n = 0; n < imaps; n++) {
  574. if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
  575. (imap[n].br_startblock != DELAYSTARTBLOCK))
  576. return 0;
  577. start_fsb += imap[n].br_blockcount;
  578. count_fsb -= imap[n].br_blockcount;
  579. }
  580. }
  581. *prealloc = 1;
  582. return 0;
  583. }
  584. int
  585. xfs_iomap_write_delay(
  586. xfs_inode_t *ip,
  587. xfs_off_t offset,
  588. size_t count,
  589. int ioflag,
  590. xfs_bmbt_irec_t *ret_imap,
  591. int *nmaps)
  592. {
  593. xfs_mount_t *mp = ip->i_mount;
  594. xfs_iocore_t *io = &ip->i_iocore;
  595. xfs_fileoff_t offset_fsb;
  596. xfs_fileoff_t last_fsb;
  597. xfs_off_t aligned_offset;
  598. xfs_fileoff_t ioalign;
  599. xfs_fsblock_t firstblock;
  600. xfs_extlen_t extsz;
  601. xfs_fsize_t isize;
  602. int nimaps;
  603. xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
  604. int prealloc, fsynced = 0;
  605. int error;
  606. ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
  607. /*
  608. * Make sure that the dquots are there. This doesn't hold
  609. * the ilock across a disk read.
  610. */
  611. error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);
  612. if (error)
  613. return XFS_ERROR(error);
  614. if (XFS_IS_REALTIME_INODE(ip)) {
  615. if (!(extsz = ip->i_d.di_extsize))
  616. extsz = mp->m_sb.sb_rextsize;
  617. } else {
  618. extsz = ip->i_d.di_extsize;
  619. }
  620. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  621. retry:
  622. isize = ip->i_d.di_size;
  623. if (io->io_new_size > isize)
  624. isize = io->io_new_size;
  625. error = xfs_iomap_eof_want_preallocate(mp, io, isize, offset, count,
  626. ioflag, imap, XFS_WRITE_IMAPS, &prealloc);
  627. if (error)
  628. return error;
  629. if (prealloc) {
  630. aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
  631. ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
  632. last_fsb = ioalign + mp->m_writeio_blocks;
  633. } else {
  634. last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
  635. }
  636. if (prealloc || extsz) {
  637. error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,
  638. &last_fsb);
  639. if (error)
  640. return error;
  641. }
  642. nimaps = XFS_WRITE_IMAPS;
  643. firstblock = NULLFSBLOCK;
  644. error = XFS_BMAPI(mp, NULL, io, offset_fsb,
  645. (xfs_filblks_t)(last_fsb - offset_fsb),
  646. XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
  647. XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
  648. &nimaps, NULL, NULL);
  649. if (error && (error != ENOSPC))
  650. return XFS_ERROR(error);
  651. /*
  652. * If bmapi returned us nothing, and if we didn't get back EDQUOT,
  653. * then we must have run out of space - flush delalloc, and retry..
  654. */
  655. if (nimaps == 0) {
  656. xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE,
  657. io, offset, count);
  658. if (xfs_flush_space(ip, &fsynced, &ioflag))
  659. return XFS_ERROR(ENOSPC);
  660. error = 0;
  661. goto retry;
  662. }
  663. if (unlikely(!imap[0].br_startblock && !(io->io_flags & XFS_IOCORE_RT)))
  664. return xfs_cmn_err_fsblock_zero(ip, &imap[0]);
  665. *ret_imap = imap[0];
  666. *nmaps = 1;
  667. return 0;
  668. }
  669. /*
  670. * Pass in a delayed allocate extent, convert it to real extents;
  671. * return to the caller the extent we create which maps on top of
  672. * the originating callers request.
  673. *
  674. * Called without a lock on the inode.
  675. */
  676. int
  677. xfs_iomap_write_allocate(
  678. xfs_inode_t *ip,
  679. xfs_off_t offset,
  680. size_t count,
  681. xfs_bmbt_irec_t *map,
  682. int *retmap)
  683. {
  684. xfs_mount_t *mp = ip->i_mount;
  685. xfs_iocore_t *io = &ip->i_iocore;
  686. xfs_fileoff_t offset_fsb, last_block;
  687. xfs_fileoff_t end_fsb, map_start_fsb;
  688. xfs_fsblock_t first_block;
  689. xfs_bmap_free_t free_list;
  690. xfs_filblks_t count_fsb;
  691. xfs_bmbt_irec_t imap[XFS_STRAT_WRITE_IMAPS];
  692. xfs_trans_t *tp;
  693. int i, nimaps, committed;
  694. int error = 0;
  695. int nres;
  696. *retmap = 0;
  697. /*
  698. * Make sure that the dquots are there.
  699. */
  700. if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
  701. return XFS_ERROR(error);
  702. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  703. count_fsb = map->br_blockcount;
  704. map_start_fsb = map->br_startoff;
  705. XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
  706. while (count_fsb != 0) {
  707. /*
  708. * Set up a transaction with which to allocate the
  709. * backing store for the file. Do allocations in a
  710. * loop until we get some space in the range we are
  711. * interested in. The other space that might be allocated
  712. * is in the delayed allocation extent on which we sit
  713. * but before our buffer starts.
  714. */
  715. nimaps = 0;
  716. while (nimaps == 0) {
  717. tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
  718. nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
  719. error = xfs_trans_reserve(tp, nres,
  720. XFS_WRITE_LOG_RES(mp),
  721. 0, XFS_TRANS_PERM_LOG_RES,
  722. XFS_WRITE_LOG_COUNT);
  723. if (error == ENOSPC) {
  724. error = xfs_trans_reserve(tp, 0,
  725. XFS_WRITE_LOG_RES(mp),
  726. 0,
  727. XFS_TRANS_PERM_LOG_RES,
  728. XFS_WRITE_LOG_COUNT);
  729. }
  730. if (error) {
  731. xfs_trans_cancel(tp, 0);
  732. return XFS_ERROR(error);
  733. }
  734. xfs_ilock(ip, XFS_ILOCK_EXCL);
  735. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  736. xfs_trans_ihold(tp, ip);
  737. XFS_BMAP_INIT(&free_list, &first_block);
  738. nimaps = XFS_STRAT_WRITE_IMAPS;
  739. /*
  740. * Ensure we don't go beyond eof - it is possible
  741. * the extents changed since we did the read call,
  742. * we dropped the ilock in the interim.
  743. */
  744. end_fsb = XFS_B_TO_FSB(mp, ip->i_d.di_size);
  745. xfs_bmap_last_offset(NULL, ip, &last_block,
  746. XFS_DATA_FORK);
  747. last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
  748. if ((map_start_fsb + count_fsb) > last_block) {
  749. count_fsb = last_block - map_start_fsb;
  750. if (count_fsb == 0) {
  751. error = EAGAIN;
  752. goto trans_cancel;
  753. }
  754. }
  755. /* Go get the actual blocks */
  756. error = XFS_BMAPI(mp, tp, io, map_start_fsb, count_fsb,
  757. XFS_BMAPI_WRITE, &first_block, 1,
  758. imap, &nimaps, &free_list, NULL);
  759. if (error)
  760. goto trans_cancel;
  761. error = xfs_bmap_finish(&tp, &free_list,
  762. first_block, &committed);
  763. if (error)
  764. goto trans_cancel;
  765. error = xfs_trans_commit(tp,
  766. XFS_TRANS_RELEASE_LOG_RES, NULL);
  767. if (error)
  768. goto error0;
  769. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  770. }
  771. /*
  772. * See if we were able to allocate an extent that
  773. * covers at least part of the callers request
  774. */
  775. for (i = 0; i < nimaps; i++) {
  776. if (unlikely(!imap[i].br_startblock &&
  777. !(io->io_flags & XFS_IOCORE_RT)))
  778. return xfs_cmn_err_fsblock_zero(ip, &imap[i]);
  779. if ((offset_fsb >= imap[i].br_startoff) &&
  780. (offset_fsb < (imap[i].br_startoff +
  781. imap[i].br_blockcount))) {
  782. *map = imap[i];
  783. *retmap = 1;
  784. XFS_STATS_INC(xs_xstrat_quick);
  785. return 0;
  786. }
  787. count_fsb -= imap[i].br_blockcount;
  788. }
  789. /* So far we have not mapped the requested part of the
  790. * file, just surrounding data, try again.
  791. */
  792. nimaps--;
  793. map_start_fsb = imap[nimaps].br_startoff +
  794. imap[nimaps].br_blockcount;
  795. }
  796. trans_cancel:
  797. xfs_bmap_cancel(&free_list);
  798. xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
  799. error0:
  800. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  801. return XFS_ERROR(error);
  802. }
  803. int
  804. xfs_iomap_write_unwritten(
  805. xfs_inode_t *ip,
  806. xfs_off_t offset,
  807. size_t count)
  808. {
  809. xfs_mount_t *mp = ip->i_mount;
  810. xfs_iocore_t *io = &ip->i_iocore;
  811. xfs_fileoff_t offset_fsb;
  812. xfs_filblks_t count_fsb;
  813. xfs_filblks_t numblks_fsb;
  814. xfs_fsblock_t firstfsb;
  815. int nimaps;
  816. xfs_trans_t *tp;
  817. xfs_bmbt_irec_t imap;
  818. xfs_bmap_free_t free_list;
  819. uint resblks;
  820. int committed;
  821. int error;
  822. xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN,
  823. &ip->i_iocore, offset, count);
  824. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  825. count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
  826. count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
  827. resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
  828. do {
  829. /*
  830. * set up a transaction to convert the range of extents
  831. * from unwritten to real. Do allocations in a loop until
  832. * we have covered the range passed in.
  833. */
  834. tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
  835. error = xfs_trans_reserve(tp, resblks,
  836. XFS_WRITE_LOG_RES(mp), 0,
  837. XFS_TRANS_PERM_LOG_RES,
  838. XFS_WRITE_LOG_COUNT);
  839. if (error) {
  840. xfs_trans_cancel(tp, 0);
  841. return XFS_ERROR(error);
  842. }
  843. xfs_ilock(ip, XFS_ILOCK_EXCL);
  844. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  845. xfs_trans_ihold(tp, ip);
  846. /*
  847. * Modify the unwritten extent state of the buffer.
  848. */
  849. XFS_BMAP_INIT(&free_list, &firstfsb);
  850. nimaps = 1;
  851. error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb,
  852. XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,
  853. 1, &imap, &nimaps, &free_list, NULL);
  854. if (error)
  855. goto error_on_bmapi_transaction;
  856. error = xfs_bmap_finish(&(tp), &(free_list),
  857. firstfsb, &committed);
  858. if (error)
  859. goto error_on_bmapi_transaction;
  860. error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
  861. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  862. if (error)
  863. return XFS_ERROR(error);
  864. if (unlikely(!imap.br_startblock &&
  865. !(io->io_flags & XFS_IOCORE_RT)))
  866. return xfs_cmn_err_fsblock_zero(ip, &imap);
  867. if ((numblks_fsb = imap.br_blockcount) == 0) {
  868. /*
  869. * The numblks_fsb value should always get
  870. * smaller, otherwise the loop is stuck.
  871. */
  872. ASSERT(imap.br_blockcount);
  873. break;
  874. }
  875. offset_fsb += numblks_fsb;
  876. count_fsb -= numblks_fsb;
  877. } while (count_fsb > 0);
  878. return 0;
  879. error_on_bmapi_transaction:
  880. xfs_bmap_cancel(&free_list);
  881. xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
  882. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  883. return XFS_ERROR(error);
  884. }