xfs_iomap.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_bit.h"
  21. #include "xfs_log.h"
  22. #include "xfs_inum.h"
  23. #include "xfs_trans.h"
  24. #include "xfs_sb.h"
  25. #include "xfs_ag.h"
  26. #include "xfs_dir2.h"
  27. #include "xfs_alloc.h"
  28. #include "xfs_dmapi.h"
  29. #include "xfs_quota.h"
  30. #include "xfs_mount.h"
  31. #include "xfs_bmap_btree.h"
  32. #include "xfs_alloc_btree.h"
  33. #include "xfs_ialloc_btree.h"
  34. #include "xfs_dir2_sf.h"
  35. #include "xfs_attr_sf.h"
  36. #include "xfs_dinode.h"
  37. #include "xfs_inode.h"
  38. #include "xfs_ialloc.h"
  39. #include "xfs_btree.h"
  40. #include "xfs_bmap.h"
  41. #include "xfs_rtalloc.h"
  42. #include "xfs_error.h"
  43. #include "xfs_itable.h"
  44. #include "xfs_rw.h"
  45. #include "xfs_attr.h"
  46. #include "xfs_buf_item.h"
  47. #include "xfs_trans_space.h"
  48. #include "xfs_utils.h"
  49. #include "xfs_iomap.h"
  50. #include "xfs_trace.h"
  51. #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
  52. << mp->m_writeio_log)
  53. #define XFS_STRAT_WRITE_IMAPS 2
  54. #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
  55. STATIC void
  56. xfs_imap_to_bmap(
  57. xfs_inode_t *ip,
  58. xfs_off_t offset,
  59. xfs_bmbt_irec_t *imap,
  60. xfs_iomap_t *iomapp,
  61. int imaps, /* Number of imap entries */
  62. int flags)
  63. {
  64. xfs_mount_t *mp = ip->i_mount;
  65. xfs_fsblock_t start_block;
  66. iomapp->iomap_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
  67. iomapp->iomap_delta = offset - iomapp->iomap_offset;
  68. iomapp->iomap_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount);
  69. iomapp->iomap_flags = flags;
  70. if (XFS_IS_REALTIME_INODE(ip)) {
  71. iomapp->iomap_flags |= IOMAP_REALTIME;
  72. iomapp->iomap_target = mp->m_rtdev_targp;
  73. } else {
  74. iomapp->iomap_target = mp->m_ddev_targp;
  75. }
  76. start_block = imap->br_startblock;
  77. if (start_block == HOLESTARTBLOCK) {
  78. iomapp->iomap_bn = IOMAP_DADDR_NULL;
  79. iomapp->iomap_flags |= IOMAP_HOLE;
  80. } else if (start_block == DELAYSTARTBLOCK) {
  81. iomapp->iomap_bn = IOMAP_DADDR_NULL;
  82. iomapp->iomap_flags |= IOMAP_DELAY;
  83. } else {
  84. iomapp->iomap_bn = xfs_fsb_to_db(ip, start_block);
  85. if (ISUNWRITTEN(imap))
  86. iomapp->iomap_flags |= IOMAP_UNWRITTEN;
  87. }
  88. }
  89. int
  90. xfs_iomap(
  91. xfs_inode_t *ip,
  92. xfs_off_t offset,
  93. ssize_t count,
  94. int flags,
  95. xfs_iomap_t *iomapp,
  96. int *niomaps)
  97. {
  98. xfs_mount_t *mp = ip->i_mount;
  99. xfs_fileoff_t offset_fsb, end_fsb;
  100. int error = 0;
  101. int lockmode = 0;
  102. xfs_bmbt_irec_t imap;
  103. int nimaps = 1;
  104. int bmapi_flags = 0;
  105. int iomap_flags = 0;
  106. ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
  107. ASSERT(niomaps && *niomaps == 1);
  108. if (XFS_FORCED_SHUTDOWN(mp))
  109. return XFS_ERROR(EIO);
  110. trace_xfs_iomap_enter(ip, offset, count, flags, NULL);
  111. switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) {
  112. case BMAPI_READ:
  113. lockmode = xfs_ilock_map_shared(ip);
  114. bmapi_flags = XFS_BMAPI_ENTIRE;
  115. break;
  116. case BMAPI_WRITE:
  117. lockmode = XFS_ILOCK_EXCL;
  118. if (flags & BMAPI_IGNSTATE)
  119. bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
  120. xfs_ilock(ip, lockmode);
  121. break;
  122. case BMAPI_ALLOCATE:
  123. lockmode = XFS_ILOCK_SHARED;
  124. bmapi_flags = XFS_BMAPI_ENTIRE;
  125. /* Attempt non-blocking lock */
  126. if (flags & BMAPI_TRYLOCK) {
  127. if (!xfs_ilock_nowait(ip, lockmode))
  128. return XFS_ERROR(EAGAIN);
  129. } else {
  130. xfs_ilock(ip, lockmode);
  131. }
  132. break;
  133. default:
  134. BUG();
  135. }
  136. ASSERT(offset <= mp->m_maxioffset);
  137. if ((xfs_fsize_t)offset + count > mp->m_maxioffset)
  138. count = mp->m_maxioffset - offset;
  139. end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
  140. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  141. error = xfs_bmapi(NULL, ip, offset_fsb,
  142. (xfs_filblks_t)(end_fsb - offset_fsb),
  143. bmapi_flags, NULL, 0, &imap,
  144. &nimaps, NULL, NULL);
  145. if (error)
  146. goto out;
  147. switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
  148. case BMAPI_WRITE:
  149. /* If we found an extent, return it */
  150. if (nimaps &&
  151. (imap.br_startblock != HOLESTARTBLOCK) &&
  152. (imap.br_startblock != DELAYSTARTBLOCK)) {
  153. trace_xfs_iomap_found(ip, offset, count, flags, &imap);
  154. break;
  155. }
  156. if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) {
  157. error = xfs_iomap_write_direct(ip, offset, count, flags,
  158. &imap, &nimaps, nimaps);
  159. } else {
  160. error = xfs_iomap_write_delay(ip, offset, count, flags,
  161. &imap, &nimaps);
  162. }
  163. if (!error) {
  164. trace_xfs_iomap_alloc(ip, offset, count, flags, &imap);
  165. }
  166. iomap_flags = IOMAP_NEW;
  167. break;
  168. case BMAPI_ALLOCATE:
  169. /* If we found an extent, return it */
  170. xfs_iunlock(ip, lockmode);
  171. lockmode = 0;
  172. if (nimaps && !isnullstartblock(imap.br_startblock)) {
  173. trace_xfs_iomap_found(ip, offset, count, flags, &imap);
  174. break;
  175. }
  176. error = xfs_iomap_write_allocate(ip, offset, count,
  177. &imap, &nimaps);
  178. break;
  179. }
  180. ASSERT(nimaps <= 1);
  181. if (nimaps)
  182. xfs_imap_to_bmap(ip, offset, &imap, iomapp, nimaps, iomap_flags);
  183. *niomaps = nimaps;
  184. out:
  185. if (lockmode)
  186. xfs_iunlock(ip, lockmode);
  187. return XFS_ERROR(error);
  188. }
  189. STATIC int
  190. xfs_iomap_eof_align_last_fsb(
  191. xfs_mount_t *mp,
  192. xfs_inode_t *ip,
  193. xfs_extlen_t extsize,
  194. xfs_fileoff_t *last_fsb)
  195. {
  196. xfs_fileoff_t new_last_fsb = 0;
  197. xfs_extlen_t align;
  198. int eof, error;
  199. if (XFS_IS_REALTIME_INODE(ip))
  200. ;
  201. /*
  202. * If mounted with the "-o swalloc" option, roundup the allocation
  203. * request to a stripe width boundary if the file size is >=
  204. * stripe width and we are allocating past the allocation eof.
  205. */
  206. else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) &&
  207. (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_swidth)))
  208. new_last_fsb = roundup_64(*last_fsb, mp->m_swidth);
  209. /*
  210. * Roundup the allocation request to a stripe unit (m_dalign) boundary
  211. * if the file size is >= stripe unit size, and we are allocating past
  212. * the allocation eof.
  213. */
  214. else if (mp->m_dalign && (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_dalign)))
  215. new_last_fsb = roundup_64(*last_fsb, mp->m_dalign);
  216. /*
  217. * Always round up the allocation request to an extent boundary
  218. * (when file on a real-time subvolume or has di_extsize hint).
  219. */
  220. if (extsize) {
  221. if (new_last_fsb)
  222. align = roundup_64(new_last_fsb, extsize);
  223. else
  224. align = extsize;
  225. new_last_fsb = roundup_64(*last_fsb, align);
  226. }
  227. if (new_last_fsb) {
  228. error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
  229. if (error)
  230. return error;
  231. if (eof)
  232. *last_fsb = new_last_fsb;
  233. }
  234. return 0;
  235. }
  236. STATIC int
  237. xfs_cmn_err_fsblock_zero(
  238. xfs_inode_t *ip,
  239. xfs_bmbt_irec_t *imap)
  240. {
  241. xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount,
  242. "Access to block zero in inode %llu "
  243. "start_block: %llx start_off: %llx "
  244. "blkcnt: %llx extent-state: %x\n",
  245. (unsigned long long)ip->i_ino,
  246. (unsigned long long)imap->br_startblock,
  247. (unsigned long long)imap->br_startoff,
  248. (unsigned long long)imap->br_blockcount,
  249. imap->br_state);
  250. return EFSCORRUPTED;
  251. }
  252. int
  253. xfs_iomap_write_direct(
  254. xfs_inode_t *ip,
  255. xfs_off_t offset,
  256. size_t count,
  257. int flags,
  258. xfs_bmbt_irec_t *ret_imap,
  259. int *nmaps,
  260. int found)
  261. {
  262. xfs_mount_t *mp = ip->i_mount;
  263. xfs_fileoff_t offset_fsb;
  264. xfs_fileoff_t last_fsb;
  265. xfs_filblks_t count_fsb, resaligned;
  266. xfs_fsblock_t firstfsb;
  267. xfs_extlen_t extsz, temp;
  268. int nimaps;
  269. int bmapi_flag;
  270. int quota_flag;
  271. int rt;
  272. xfs_trans_t *tp;
  273. xfs_bmbt_irec_t imap;
  274. xfs_bmap_free_t free_list;
  275. uint qblocks, resblks, resrtextents;
  276. int committed;
  277. int error;
  278. /*
  279. * Make sure that the dquots are there. This doesn't hold
  280. * the ilock across a disk read.
  281. */
  282. error = xfs_qm_dqattach_locked(ip, 0);
  283. if (error)
  284. return XFS_ERROR(error);
  285. rt = XFS_IS_REALTIME_INODE(ip);
  286. extsz = xfs_get_extsz_hint(ip);
  287. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  288. last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
  289. if ((offset + count) > ip->i_size) {
  290. error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
  291. if (error)
  292. goto error_out;
  293. } else {
  294. if (found && (ret_imap->br_startblock == HOLESTARTBLOCK))
  295. last_fsb = MIN(last_fsb, (xfs_fileoff_t)
  296. ret_imap->br_blockcount +
  297. ret_imap->br_startoff);
  298. }
  299. count_fsb = last_fsb - offset_fsb;
  300. ASSERT(count_fsb > 0);
  301. resaligned = count_fsb;
  302. if (unlikely(extsz)) {
  303. if ((temp = do_mod(offset_fsb, extsz)))
  304. resaligned += temp;
  305. if ((temp = do_mod(resaligned, extsz)))
  306. resaligned += extsz - temp;
  307. }
  308. if (unlikely(rt)) {
  309. resrtextents = qblocks = resaligned;
  310. resrtextents /= mp->m_sb.sb_rextsize;
  311. resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
  312. quota_flag = XFS_QMOPT_RES_RTBLKS;
  313. } else {
  314. resrtextents = 0;
  315. resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
  316. quota_flag = XFS_QMOPT_RES_REGBLKS;
  317. }
  318. /*
  319. * Allocate and setup the transaction
  320. */
  321. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  322. tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
  323. error = xfs_trans_reserve(tp, resblks,
  324. XFS_WRITE_LOG_RES(mp), resrtextents,
  325. XFS_TRANS_PERM_LOG_RES,
  326. XFS_WRITE_LOG_COUNT);
  327. /*
  328. * Check for running out of space, note: need lock to return
  329. */
  330. if (error)
  331. xfs_trans_cancel(tp, 0);
  332. xfs_ilock(ip, XFS_ILOCK_EXCL);
  333. if (error)
  334. goto error_out;
  335. error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
  336. if (error)
  337. goto error1;
  338. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  339. xfs_trans_ihold(tp, ip);
  340. bmapi_flag = XFS_BMAPI_WRITE;
  341. if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz))
  342. bmapi_flag |= XFS_BMAPI_PREALLOC;
  343. /*
  344. * Issue the xfs_bmapi() call to allocate the blocks
  345. */
  346. xfs_bmap_init(&free_list, &firstfsb);
  347. nimaps = 1;
  348. error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag,
  349. &firstfsb, 0, &imap, &nimaps, &free_list, NULL);
  350. if (error)
  351. goto error0;
  352. /*
  353. * Complete the transaction
  354. */
  355. error = xfs_bmap_finish(&tp, &free_list, &committed);
  356. if (error)
  357. goto error0;
  358. error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
  359. if (error)
  360. goto error_out;
  361. /*
  362. * Copy any maps to caller's array and return any error.
  363. */
  364. if (nimaps == 0) {
  365. error = ENOSPC;
  366. goto error_out;
  367. }
  368. if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) {
  369. error = xfs_cmn_err_fsblock_zero(ip, &imap);
  370. goto error_out;
  371. }
  372. *ret_imap = imap;
  373. *nmaps = 1;
  374. return 0;
  375. error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
  376. xfs_bmap_cancel(&free_list);
  377. xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
  378. error1: /* Just cancel transaction */
  379. xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
  380. *nmaps = 0; /* nothing set-up here */
  381. error_out:
  382. return XFS_ERROR(error);
  383. }
  384. /*
  385. * If the caller is doing a write at the end of the file, then extend the
  386. * allocation out to the file system's write iosize. We clean up any extra
  387. * space left over when the file is closed in xfs_inactive().
  388. */
  389. STATIC int
  390. xfs_iomap_eof_want_preallocate(
  391. xfs_mount_t *mp,
  392. xfs_inode_t *ip,
  393. xfs_off_t offset,
  394. size_t count,
  395. int ioflag,
  396. xfs_bmbt_irec_t *imap,
  397. int nimaps,
  398. int *prealloc)
  399. {
  400. xfs_fileoff_t start_fsb;
  401. xfs_filblks_t count_fsb;
  402. xfs_fsblock_t firstblock;
  403. int n, error, imaps;
  404. *prealloc = 0;
  405. if ((offset + count) <= ip->i_size)
  406. return 0;
  407. /*
  408. * If there are any real blocks past eof, then don't
  409. * do any speculative allocation.
  410. */
  411. start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
  412. count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
  413. while (count_fsb > 0) {
  414. imaps = nimaps;
  415. firstblock = NULLFSBLOCK;
  416. error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0,
  417. &firstblock, 0, imap, &imaps, NULL, NULL);
  418. if (error)
  419. return error;
  420. for (n = 0; n < imaps; n++) {
  421. if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
  422. (imap[n].br_startblock != DELAYSTARTBLOCK))
  423. return 0;
  424. start_fsb += imap[n].br_blockcount;
  425. count_fsb -= imap[n].br_blockcount;
  426. }
  427. }
  428. *prealloc = 1;
  429. return 0;
  430. }
  431. int
  432. xfs_iomap_write_delay(
  433. xfs_inode_t *ip,
  434. xfs_off_t offset,
  435. size_t count,
  436. int ioflag,
  437. xfs_bmbt_irec_t *ret_imap,
  438. int *nmaps)
  439. {
  440. xfs_mount_t *mp = ip->i_mount;
  441. xfs_fileoff_t offset_fsb;
  442. xfs_fileoff_t last_fsb;
  443. xfs_off_t aligned_offset;
  444. xfs_fileoff_t ioalign;
  445. xfs_fsblock_t firstblock;
  446. xfs_extlen_t extsz;
  447. int nimaps;
  448. xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
  449. int prealloc, flushed = 0;
  450. int error;
  451. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  452. /*
  453. * Make sure that the dquots are there. This doesn't hold
  454. * the ilock across a disk read.
  455. */
  456. error = xfs_qm_dqattach_locked(ip, 0);
  457. if (error)
  458. return XFS_ERROR(error);
  459. extsz = xfs_get_extsz_hint(ip);
  460. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  461. error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
  462. ioflag, imap, XFS_WRITE_IMAPS, &prealloc);
  463. if (error)
  464. return error;
  465. retry:
  466. if (prealloc) {
  467. aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
  468. ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
  469. last_fsb = ioalign + mp->m_writeio_blocks;
  470. } else {
  471. last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
  472. }
  473. if (prealloc || extsz) {
  474. error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
  475. if (error)
  476. return error;
  477. }
  478. nimaps = XFS_WRITE_IMAPS;
  479. firstblock = NULLFSBLOCK;
  480. error = xfs_bmapi(NULL, ip, offset_fsb,
  481. (xfs_filblks_t)(last_fsb - offset_fsb),
  482. XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
  483. XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
  484. &nimaps, NULL, NULL);
  485. if (error && (error != ENOSPC))
  486. return XFS_ERROR(error);
  487. /*
  488. * If bmapi returned us nothing, and if we didn't get back EDQUOT,
  489. * then we must have run out of space - flush all other inodes with
  490. * delalloc blocks and retry without EOF preallocation.
  491. */
  492. if (nimaps == 0) {
  493. trace_xfs_delalloc_enospc(ip, offset, count);
  494. if (flushed)
  495. return XFS_ERROR(ENOSPC);
  496. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  497. xfs_flush_inodes(ip);
  498. xfs_ilock(ip, XFS_ILOCK_EXCL);
  499. flushed = 1;
  500. error = 0;
  501. prealloc = 0;
  502. goto retry;
  503. }
  504. if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
  505. return xfs_cmn_err_fsblock_zero(ip, &imap[0]);
  506. *ret_imap = imap[0];
  507. *nmaps = 1;
  508. return 0;
  509. }
  510. /*
  511. * Pass in a delayed allocate extent, convert it to real extents;
  512. * return to the caller the extent we create which maps on top of
  513. * the originating callers request.
  514. *
  515. * Called without a lock on the inode.
  516. *
  517. * We no longer bother to look at the incoming map - all we have to
  518. * guarantee is that whatever we allocate fills the required range.
  519. */
  520. int
  521. xfs_iomap_write_allocate(
  522. xfs_inode_t *ip,
  523. xfs_off_t offset,
  524. size_t count,
  525. xfs_bmbt_irec_t *map,
  526. int *retmap)
  527. {
  528. xfs_mount_t *mp = ip->i_mount;
  529. xfs_fileoff_t offset_fsb, last_block;
  530. xfs_fileoff_t end_fsb, map_start_fsb;
  531. xfs_fsblock_t first_block;
  532. xfs_bmap_free_t free_list;
  533. xfs_filblks_t count_fsb;
  534. xfs_bmbt_irec_t imap;
  535. xfs_trans_t *tp;
  536. int nimaps, committed;
  537. int error = 0;
  538. int nres;
  539. *retmap = 0;
  540. /*
  541. * Make sure that the dquots are there.
  542. */
  543. error = xfs_qm_dqattach(ip, 0);
  544. if (error)
  545. return XFS_ERROR(error);
  546. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  547. count_fsb = map->br_blockcount;
  548. map_start_fsb = map->br_startoff;
  549. XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
  550. while (count_fsb != 0) {
  551. /*
  552. * Set up a transaction with which to allocate the
  553. * backing store for the file. Do allocations in a
  554. * loop until we get some space in the range we are
  555. * interested in. The other space that might be allocated
  556. * is in the delayed allocation extent on which we sit
  557. * but before our buffer starts.
  558. */
  559. nimaps = 0;
  560. while (nimaps == 0) {
  561. tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
  562. tp->t_flags |= XFS_TRANS_RESERVE;
  563. nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
  564. error = xfs_trans_reserve(tp, nres,
  565. XFS_WRITE_LOG_RES(mp),
  566. 0, XFS_TRANS_PERM_LOG_RES,
  567. XFS_WRITE_LOG_COUNT);
  568. if (error) {
  569. xfs_trans_cancel(tp, 0);
  570. return XFS_ERROR(error);
  571. }
  572. xfs_ilock(ip, XFS_ILOCK_EXCL);
  573. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  574. xfs_trans_ihold(tp, ip);
  575. xfs_bmap_init(&free_list, &first_block);
  576. /*
  577. * it is possible that the extents have changed since
  578. * we did the read call as we dropped the ilock for a
  579. * while. We have to be careful about truncates or hole
  580. * punchs here - we are not allowed to allocate
  581. * non-delalloc blocks here.
  582. *
  583. * The only protection against truncation is the pages
  584. * for the range we are being asked to convert are
  585. * locked and hence a truncate will block on them
  586. * first.
  587. *
  588. * As a result, if we go beyond the range we really
  589. * need and hit an delalloc extent boundary followed by
  590. * a hole while we have excess blocks in the map, we
  591. * will fill the hole incorrectly and overrun the
  592. * transaction reservation.
  593. *
  594. * Using a single map prevents this as we are forced to
  595. * check each map we look for overlap with the desired
  596. * range and abort as soon as we find it. Also, given
  597. * that we only return a single map, having one beyond
  598. * what we can return is probably a bit silly.
  599. *
  600. * We also need to check that we don't go beyond EOF;
  601. * this is a truncate optimisation as a truncate sets
  602. * the new file size before block on the pages we
  603. * currently have locked under writeback. Because they
  604. * are about to be tossed, we don't need to write them
  605. * back....
  606. */
  607. nimaps = 1;
  608. end_fsb = XFS_B_TO_FSB(mp, ip->i_size);
  609. error = xfs_bmap_last_offset(NULL, ip, &last_block,
  610. XFS_DATA_FORK);
  611. if (error)
  612. goto trans_cancel;
  613. last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
  614. if ((map_start_fsb + count_fsb) > last_block) {
  615. count_fsb = last_block - map_start_fsb;
  616. if (count_fsb == 0) {
  617. error = EAGAIN;
  618. goto trans_cancel;
  619. }
  620. }
  621. /* Go get the actual blocks */
  622. error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb,
  623. XFS_BMAPI_WRITE, &first_block, 1,
  624. &imap, &nimaps, &free_list, NULL);
  625. if (error)
  626. goto trans_cancel;
  627. error = xfs_bmap_finish(&tp, &free_list, &committed);
  628. if (error)
  629. goto trans_cancel;
  630. error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
  631. if (error)
  632. goto error0;
  633. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  634. }
  635. /*
  636. * See if we were able to allocate an extent that
  637. * covers at least part of the callers request
  638. */
  639. if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
  640. return xfs_cmn_err_fsblock_zero(ip, &imap);
  641. if ((offset_fsb >= imap.br_startoff) &&
  642. (offset_fsb < (imap.br_startoff +
  643. imap.br_blockcount))) {
  644. *map = imap;
  645. *retmap = 1;
  646. XFS_STATS_INC(xs_xstrat_quick);
  647. return 0;
  648. }
  649. /*
  650. * So far we have not mapped the requested part of the
  651. * file, just surrounding data, try again.
  652. */
  653. count_fsb -= imap.br_blockcount;
  654. map_start_fsb = imap.br_startoff + imap.br_blockcount;
  655. }
  656. trans_cancel:
  657. xfs_bmap_cancel(&free_list);
  658. xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
  659. error0:
  660. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  661. return XFS_ERROR(error);
  662. }
  663. int
  664. xfs_iomap_write_unwritten(
  665. xfs_inode_t *ip,
  666. xfs_off_t offset,
  667. size_t count)
  668. {
  669. xfs_mount_t *mp = ip->i_mount;
  670. xfs_fileoff_t offset_fsb;
  671. xfs_filblks_t count_fsb;
  672. xfs_filblks_t numblks_fsb;
  673. xfs_fsblock_t firstfsb;
  674. int nimaps;
  675. xfs_trans_t *tp;
  676. xfs_bmbt_irec_t imap;
  677. xfs_bmap_free_t free_list;
  678. uint resblks;
  679. int committed;
  680. int error;
  681. trace_xfs_unwritten_convert(ip, offset, count);
  682. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  683. count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
  684. count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
  685. /*
  686. * Reserve enough blocks in this transaction for two complete extent
  687. * btree splits. We may be converting the middle part of an unwritten
  688. * extent and in this case we will insert two new extents in the btree
  689. * each of which could cause a full split.
  690. *
  691. * This reservation amount will be used in the first call to
  692. * xfs_bmbt_split() to select an AG with enough space to satisfy the
  693. * rest of the operation.
  694. */
  695. resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
  696. do {
  697. /*
  698. * set up a transaction to convert the range of extents
  699. * from unwritten to real. Do allocations in a loop until
  700. * we have covered the range passed in.
  701. *
  702. * Note that we open code the transaction allocation here
  703. * to pass KM_NOFS--we can't risk to recursing back into
  704. * the filesystem here as we might be asked to write out
  705. * the same inode that we complete here and might deadlock
  706. * on the iolock.
  707. */
  708. xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
  709. tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
  710. tp->t_flags |= XFS_TRANS_RESERVE;
  711. error = xfs_trans_reserve(tp, resblks,
  712. XFS_WRITE_LOG_RES(mp), 0,
  713. XFS_TRANS_PERM_LOG_RES,
  714. XFS_WRITE_LOG_COUNT);
  715. if (error) {
  716. xfs_trans_cancel(tp, 0);
  717. return XFS_ERROR(error);
  718. }
  719. xfs_ilock(ip, XFS_ILOCK_EXCL);
  720. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  721. xfs_trans_ihold(tp, ip);
  722. /*
  723. * Modify the unwritten extent state of the buffer.
  724. */
  725. xfs_bmap_init(&free_list, &firstfsb);
  726. nimaps = 1;
  727. error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
  728. XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,
  729. 1, &imap, &nimaps, &free_list, NULL);
  730. if (error)
  731. goto error_on_bmapi_transaction;
  732. error = xfs_bmap_finish(&(tp), &(free_list), &committed);
  733. if (error)
  734. goto error_on_bmapi_transaction;
  735. error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
  736. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  737. if (error)
  738. return XFS_ERROR(error);
  739. if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
  740. return xfs_cmn_err_fsblock_zero(ip, &imap);
  741. if ((numblks_fsb = imap.br_blockcount) == 0) {
  742. /*
  743. * The numblks_fsb value should always get
  744. * smaller, otherwise the loop is stuck.
  745. */
  746. ASSERT(imap.br_blockcount);
  747. break;
  748. }
  749. offset_fsb += numblks_fsb;
  750. count_fsb -= numblks_fsb;
  751. } while (count_fsb > 0);
  752. return 0;
  753. error_on_bmapi_transaction:
  754. xfs_bmap_cancel(&free_list);
  755. xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
  756. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  757. return XFS_ERROR(error);
  758. }