12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631 |
- /*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
- #include "xfs.h"
- #include "xfs_bit.h"
- #include "xfs_log.h"
- #include "xfs_inum.h"
- #include "xfs_sb.h"
- #include "xfs_ag.h"
- #include "xfs_dir2.h"
- #include "xfs_trans.h"
- #include "xfs_dmapi.h"
- #include "xfs_mount.h"
- #include "xfs_bmap_btree.h"
- #include "xfs_alloc_btree.h"
- #include "xfs_ialloc_btree.h"
- #include "xfs_dir2_sf.h"
- #include "xfs_attr_sf.h"
- #include "xfs_dinode.h"
- #include "xfs_inode.h"
- #include "xfs_alloc.h"
- #include "xfs_btree.h"
- #include "xfs_error.h"
- #include "xfs_rw.h"
- #include "xfs_iomap.h"
- #include "xfs_vnodeops.h"
- #include <linux/mpage.h>
- #include <linux/pagevec.h>
- #include <linux/writeback.h>
- /*
- * Prime number of hash buckets since address is used as the key.
- */
- #define NVSYNC 37
- #define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
- static wait_queue_head_t xfs_ioend_wq[NVSYNC];
- void __init
- xfs_ioend_init(void)
- {
- int i;
- for (i = 0; i < NVSYNC; i++)
- init_waitqueue_head(&xfs_ioend_wq[i]);
- }
- void
- xfs_ioend_wait(
- xfs_inode_t *ip)
- {
- wait_queue_head_t *wq = to_ioend_wq(ip);
- wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
- }
- STATIC void
- xfs_ioend_wake(
- xfs_inode_t *ip)
- {
- if (atomic_dec_and_test(&ip->i_iocount))
- wake_up(to_ioend_wq(ip));
- }
- STATIC void
- xfs_count_page_state(
- struct page *page,
- int *delalloc,
- int *unmapped,
- int *unwritten)
- {
- struct buffer_head *bh, *head;
- *delalloc = *unmapped = *unwritten = 0;
- bh = head = page_buffers(page);
- do {
- if (buffer_uptodate(bh) && !buffer_mapped(bh))
- (*unmapped) = 1;
- else if (buffer_unwritten(bh))
- (*unwritten) = 1;
- else if (buffer_delay(bh))
- (*delalloc) = 1;
- } while ((bh = bh->b_this_page) != head);
- }
- #if defined(XFS_RW_TRACE)
- void
- xfs_page_trace(
- int tag,
- struct inode *inode,
- struct page *page,
- unsigned long pgoff)
- {
- xfs_inode_t *ip;
- loff_t isize = i_size_read(inode);
- loff_t offset = page_offset(page);
- int delalloc = -1, unmapped = -1, unwritten = -1;
- if (page_has_buffers(page))
- xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
- ip = XFS_I(inode);
- if (!ip->i_rwtrace)
- return;
- ktrace_enter(ip->i_rwtrace,
- (void *)((unsigned long)tag),
- (void *)ip,
- (void *)inode,
- (void *)page,
- (void *)pgoff,
- (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
- (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
- (void *)((unsigned long)(isize & 0xffffffff)),
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)delalloc),
- (void *)((unsigned long)unmapped),
- (void *)((unsigned long)unwritten),
- (void *)((unsigned long)current_pid()),
- (void *)NULL);
- }
- #else
- #define xfs_page_trace(tag, inode, page, pgoff)
- #endif
- STATIC struct block_device *
- xfs_find_bdev_for_inode(
- struct xfs_inode *ip)
- {
- struct xfs_mount *mp = ip->i_mount;
- if (XFS_IS_REALTIME_INODE(ip))
- return mp->m_rtdev_targp->bt_bdev;
- else
- return mp->m_ddev_targp->bt_bdev;
- }
- /*
- * We're now finished for good with this ioend structure.
- * Update the page state via the associated buffer_heads,
- * release holds on the inode and bio, and finally free
- * up memory. Do not use the ioend after this.
- */
- STATIC void
- xfs_destroy_ioend(
- xfs_ioend_t *ioend)
- {
- struct buffer_head *bh, *next;
- struct xfs_inode *ip = XFS_I(ioend->io_inode);
- for (bh = ioend->io_buffer_head; bh; bh = next) {
- next = bh->b_private;
- bh->b_end_io(bh, !ioend->io_error);
- }
- /*
- * Volume managers supporting multiple paths can send back ENODEV
- * when the final path disappears. In this case continuing to fill
- * the page cache with dirty data which cannot be written out is
- * evil, so prevent that.
- */
- if (unlikely(ioend->io_error == -ENODEV)) {
- xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
- __FILE__, __LINE__);
- }
- xfs_ioend_wake(ip);
- mempool_free(ioend, xfs_ioend_pool);
- }
- /*
- * Update on-disk file size now that data has been written to disk.
- * The current in-memory file size is i_size. If a write is beyond
- * eof i_new_size will be the intended file size until i_size is
- * updated. If this write does not extend all the way to the valid
- * file size then restrict this update to the end of the write.
- */
- STATIC void
- xfs_setfilesize(
- xfs_ioend_t *ioend)
- {
- xfs_inode_t *ip = XFS_I(ioend->io_inode);
- xfs_fsize_t isize;
- xfs_fsize_t bsize;
- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
- ASSERT(ioend->io_type != IOMAP_READ);
- if (unlikely(ioend->io_error))
- return;
- bsize = ioend->io_offset + ioend->io_size;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- isize = MAX(ip->i_size, ip->i_new_size);
- isize = MIN(isize, bsize);
- if (ip->i_d.di_size < isize) {
- ip->i_d.di_size = isize;
- ip->i_update_core = 1;
- ip->i_update_size = 1;
- xfs_mark_inode_dirty_sync(ip);
- }
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- }
- /*
- * Buffered IO write completion for delayed allocate extents.
- */
- STATIC void
- xfs_end_bio_delalloc(
- struct work_struct *work)
- {
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
- xfs_setfilesize(ioend);
- xfs_destroy_ioend(ioend);
- }
- /*
- * Buffered IO write completion for regular, written extents.
- */
- STATIC void
- xfs_end_bio_written(
- struct work_struct *work)
- {
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
- xfs_setfilesize(ioend);
- xfs_destroy_ioend(ioend);
- }
- /*
- * IO write completion for unwritten extents.
- *
- * Issue transactions to convert a buffer range from unwritten
- * to written extents.
- */
- STATIC void
- xfs_end_bio_unwritten(
- struct work_struct *work)
- {
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
- struct xfs_inode *ip = XFS_I(ioend->io_inode);
- xfs_off_t offset = ioend->io_offset;
- size_t size = ioend->io_size;
- if (likely(!ioend->io_error)) {
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- int error;
- error = xfs_iomap_write_unwritten(ip, offset, size);
- if (error)
- ioend->io_error = error;
- }
- xfs_setfilesize(ioend);
- }
- xfs_destroy_ioend(ioend);
- }
- /*
- * IO read completion for regular, written extents.
- */
- STATIC void
- xfs_end_bio_read(
- struct work_struct *work)
- {
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
- xfs_destroy_ioend(ioend);
- }
- /*
- * Schedule IO completion handling on a xfsdatad if this was
- * the final hold on this ioend. If we are asked to wait,
- * flush the workqueue.
- */
- STATIC void
- xfs_finish_ioend(
- xfs_ioend_t *ioend,
- int wait)
- {
- if (atomic_dec_and_test(&ioend->io_remaining)) {
- struct workqueue_struct *wq = xfsdatad_workqueue;
- if (ioend->io_work.func == xfs_end_bio_unwritten)
- wq = xfsconvertd_workqueue;
- queue_work(wq, &ioend->io_work);
- if (wait)
- flush_workqueue(wq);
- }
- }
- /*
- * Allocate and initialise an IO completion structure.
- * We need to track unwritten extent write completion here initially.
- * We'll need to extend this for updating the ondisk inode size later
- * (vs. incore size).
- */
- STATIC xfs_ioend_t *
- xfs_alloc_ioend(
- struct inode *inode,
- unsigned int type)
- {
- xfs_ioend_t *ioend;
- ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
- /*
- * Set the count to 1 initially, which will prevent an I/O
- * completion callback from happening before we have started
- * all the I/O from calling the completion routine too early.
- */
- atomic_set(&ioend->io_remaining, 1);
- ioend->io_error = 0;
- ioend->io_list = NULL;
- ioend->io_type = type;
- ioend->io_inode = inode;
- ioend->io_buffer_head = NULL;
- ioend->io_buffer_tail = NULL;
- atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
- ioend->io_offset = 0;
- ioend->io_size = 0;
- if (type == IOMAP_UNWRITTEN)
- INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
- else if (type == IOMAP_DELAY)
- INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
- else if (type == IOMAP_READ)
- INIT_WORK(&ioend->io_work, xfs_end_bio_read);
- else
- INIT_WORK(&ioend->io_work, xfs_end_bio_written);
- return ioend;
- }
- STATIC int
- xfs_map_blocks(
- struct inode *inode,
- loff_t offset,
- ssize_t count,
- xfs_iomap_t *mapp,
- int flags)
- {
- int nmaps = 1;
- return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
- }
- STATIC_INLINE int
- xfs_iomap_valid(
- xfs_iomap_t *iomapp,
- loff_t offset)
- {
- return offset >= iomapp->iomap_offset &&
- offset < iomapp->iomap_offset + iomapp->iomap_bsize;
- }
- /*
- * BIO completion handler for buffered IO.
- */
- STATIC void
- xfs_end_bio(
- struct bio *bio,
- int error)
- {
- xfs_ioend_t *ioend = bio->bi_private;
- ASSERT(atomic_read(&bio->bi_cnt) >= 1);
- ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
- /* Toss bio and pass work off to an xfsdatad thread */
- bio->bi_private = NULL;
- bio->bi_end_io = NULL;
- bio_put(bio);
- xfs_finish_ioend(ioend, 0);
- }
- STATIC void
- xfs_submit_ioend_bio(
- xfs_ioend_t *ioend,
- struct bio *bio)
- {
- atomic_inc(&ioend->io_remaining);
- bio->bi_private = ioend;
- bio->bi_end_io = xfs_end_bio;
- submit_bio(WRITE, bio);
- ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
- bio_put(bio);
- }
- STATIC struct bio *
- xfs_alloc_ioend_bio(
- struct buffer_head *bh)
- {
- struct bio *bio;
- int nvecs = bio_get_nr_vecs(bh->b_bdev);
- do {
- bio = bio_alloc(GFP_NOIO, nvecs);
- nvecs >>= 1;
- } while (!bio);
- ASSERT(bio->bi_private == NULL);
- bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio->bi_bdev = bh->b_bdev;
- bio_get(bio);
- return bio;
- }
- STATIC void
- xfs_start_buffer_writeback(
- struct buffer_head *bh)
- {
- ASSERT(buffer_mapped(bh));
- ASSERT(buffer_locked(bh));
- ASSERT(!buffer_delay(bh));
- ASSERT(!buffer_unwritten(bh));
- mark_buffer_async_write(bh);
- set_buffer_uptodate(bh);
- clear_buffer_dirty(bh);
- }
- STATIC void
- xfs_start_page_writeback(
- struct page *page,
- int clear_dirty,
- int buffers)
- {
- ASSERT(PageLocked(page));
- ASSERT(!PageWriteback(page));
- if (clear_dirty)
- clear_page_dirty_for_io(page);
- set_page_writeback(page);
- unlock_page(page);
- /* If no buffers on the page are to be written, finish it here */
- if (!buffers)
- end_page_writeback(page);
- }
- static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
- {
- return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
- }
- /*
- * Submit all of the bios for all of the ioends we have saved up, covering the
- * initial writepage page and also any probed pages.
- *
- * Because we may have multiple ioends spanning a page, we need to start
- * writeback on all the buffers before we submit them for I/O. If we mark the
- * buffers as we got, then we can end up with a page that only has buffers
- * marked async write and I/O complete on can occur before we mark the other
- * buffers async write.
- *
- * The end result of this is that we trip a bug in end_page_writeback() because
- * we call it twice for the one page as the code in end_buffer_async_write()
- * assumes that all buffers on the page are started at the same time.
- *
- * The fix is two passes across the ioend list - one to start writeback on the
- * buffer_heads, and then submit them for I/O on the second pass.
- */
- STATIC void
- xfs_submit_ioend(
- xfs_ioend_t *ioend)
- {
- xfs_ioend_t *head = ioend;
- xfs_ioend_t *next;
- struct buffer_head *bh;
- struct bio *bio;
- sector_t lastblock = 0;
- /* Pass 1 - start writeback */
- do {
- next = ioend->io_list;
- for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
- xfs_start_buffer_writeback(bh);
- }
- } while ((ioend = next) != NULL);
- /* Pass 2 - submit I/O */
- ioend = head;
- do {
- next = ioend->io_list;
- bio = NULL;
- for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
- if (!bio) {
- retry:
- bio = xfs_alloc_ioend_bio(bh);
- } else if (bh->b_blocknr != lastblock + 1) {
- xfs_submit_ioend_bio(ioend, bio);
- goto retry;
- }
- if (bio_add_buffer(bio, bh) != bh->b_size) {
- xfs_submit_ioend_bio(ioend, bio);
- goto retry;
- }
- lastblock = bh->b_blocknr;
- }
- if (bio)
- xfs_submit_ioend_bio(ioend, bio);
- xfs_finish_ioend(ioend, 0);
- } while ((ioend = next) != NULL);
- }
- /*
- * Cancel submission of all buffer_heads so far in this endio.
- * Toss the endio too. Only ever called for the initial page
- * in a writepage request, so only ever one page.
- */
- STATIC void
- xfs_cancel_ioend(
- xfs_ioend_t *ioend)
- {
- xfs_ioend_t *next;
- struct buffer_head *bh, *next_bh;
- do {
- next = ioend->io_list;
- bh = ioend->io_buffer_head;
- do {
- next_bh = bh->b_private;
- clear_buffer_async_write(bh);
- unlock_buffer(bh);
- } while ((bh = next_bh) != NULL);
- xfs_ioend_wake(XFS_I(ioend->io_inode));
- mempool_free(ioend, xfs_ioend_pool);
- } while ((ioend = next) != NULL);
- }
- /*
- * Test to see if we've been building up a completion structure for
- * earlier buffers -- if so, we try to append to this ioend if we
- * can, otherwise we finish off any current ioend and start another.
- * Return true if we've finished the given ioend.
- */
- STATIC void
- xfs_add_to_ioend(
- struct inode *inode,
- struct buffer_head *bh,
- xfs_off_t offset,
- unsigned int type,
- xfs_ioend_t **result,
- int need_ioend)
- {
- xfs_ioend_t *ioend = *result;
- if (!ioend || need_ioend || type != ioend->io_type) {
- xfs_ioend_t *previous = *result;
- ioend = xfs_alloc_ioend(inode, type);
- ioend->io_offset = offset;
- ioend->io_buffer_head = bh;
- ioend->io_buffer_tail = bh;
- if (previous)
- previous->io_list = ioend;
- *result = ioend;
- } else {
- ioend->io_buffer_tail->b_private = bh;
- ioend->io_buffer_tail = bh;
- }
- bh->b_private = NULL;
- ioend->io_size += bh->b_size;
- }
- STATIC void
- xfs_map_buffer(
- struct buffer_head *bh,
- xfs_iomap_t *mp,
- xfs_off_t offset,
- uint block_bits)
- {
- sector_t bn;
- ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
- bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
- ((offset - mp->iomap_offset) >> block_bits);
- ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
- bh->b_blocknr = bn;
- set_buffer_mapped(bh);
- }
- STATIC void
- xfs_map_at_offset(
- struct buffer_head *bh,
- loff_t offset,
- int block_bits,
- xfs_iomap_t *iomapp)
- {
- ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
- ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
- lock_buffer(bh);
- xfs_map_buffer(bh, iomapp, offset, block_bits);
- bh->b_bdev = iomapp->iomap_target->bt_bdev;
- set_buffer_mapped(bh);
- clear_buffer_delay(bh);
- clear_buffer_unwritten(bh);
- }
- /*
- * Look for a page at index that is suitable for clustering.
- */
- STATIC unsigned int
- xfs_probe_page(
- struct page *page,
- unsigned int pg_offset,
- int mapped)
- {
- int ret = 0;
- if (PageWriteback(page))
- return 0;
- if (page->mapping && PageDirty(page)) {
- if (page_has_buffers(page)) {
- struct buffer_head *bh, *head;
- bh = head = page_buffers(page);
- do {
- if (!buffer_uptodate(bh))
- break;
- if (mapped != buffer_mapped(bh))
- break;
- ret += bh->b_size;
- if (ret >= pg_offset)
- break;
- } while ((bh = bh->b_this_page) != head);
- } else
- ret = mapped ? 0 : PAGE_CACHE_SIZE;
- }
- return ret;
- }
- STATIC size_t
- xfs_probe_cluster(
- struct inode *inode,
- struct page *startpage,
- struct buffer_head *bh,
- struct buffer_head *head,
- int mapped)
- {
- struct pagevec pvec;
- pgoff_t tindex, tlast, tloff;
- size_t total = 0;
- int done = 0, i;
- /* First sum forwards in this page */
- do {
- if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
- return total;
- total += bh->b_size;
- } while ((bh = bh->b_this_page) != head);
- /* if we reached the end of the page, sum forwards in following pages */
- tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
- tindex = startpage->index + 1;
- /* Prune this back to avoid pathological behavior */
- tloff = min(tlast, startpage->index + 64);
- pagevec_init(&pvec, 0);
- while (!done && tindex <= tloff) {
- unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
- if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
- break;
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
- size_t pg_offset, pg_len = 0;
- if (tindex == tlast) {
- pg_offset =
- i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
- if (!pg_offset) {
- done = 1;
- break;
- }
- } else
- pg_offset = PAGE_CACHE_SIZE;
- if (page->index == tindex && trylock_page(page)) {
- pg_len = xfs_probe_page(page, pg_offset, mapped);
- unlock_page(page);
- }
- if (!pg_len) {
- done = 1;
- break;
- }
- total += pg_len;
- tindex++;
- }
- pagevec_release(&pvec);
- cond_resched();
- }
- return total;
- }
- /*
- * Test if a given page is suitable for writing as part of an unwritten
- * or delayed allocate extent.
- */
- STATIC int
- xfs_is_delayed_page(
- struct page *page,
- unsigned int type)
- {
- if (PageWriteback(page))
- return 0;
- if (page->mapping && page_has_buffers(page)) {
- struct buffer_head *bh, *head;
- int acceptable = 0;
- bh = head = page_buffers(page);
- do {
- if (buffer_unwritten(bh))
- acceptable = (type == IOMAP_UNWRITTEN);
- else if (buffer_delay(bh))
- acceptable = (type == IOMAP_DELAY);
- else if (buffer_dirty(bh) && buffer_mapped(bh))
- acceptable = (type == IOMAP_NEW);
- else
- break;
- } while ((bh = bh->b_this_page) != head);
- if (acceptable)
- return 1;
- }
- return 0;
- }
- /*
- * Allocate & map buffers for page given the extent map. Write it out.
- * except for the original page of a writepage, this is called on
- * delalloc/unwritten pages only, for the original page it is possible
- * that the page has no mapping at all.
- */
- STATIC int
- xfs_convert_page(
- struct inode *inode,
- struct page *page,
- loff_t tindex,
- xfs_iomap_t *mp,
- xfs_ioend_t **ioendp,
- struct writeback_control *wbc,
- int startio,
- int all_bh)
- {
- struct buffer_head *bh, *head;
- xfs_off_t end_offset;
- unsigned long p_offset;
- unsigned int type;
- int bbits = inode->i_blkbits;
- int len, page_dirty;
- int count = 0, done = 0, uptodate = 1;
- xfs_off_t offset = page_offset(page);
- if (page->index != tindex)
- goto fail;
- if (!trylock_page(page))
- goto fail;
- if (PageWriteback(page))
- goto fail_unlock_page;
- if (page->mapping != inode->i_mapping)
- goto fail_unlock_page;
- if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
- goto fail_unlock_page;
- /*
- * page_dirty is initially a count of buffers on the page before
- * EOF and is decremented as we move each into a cleanable state.
- *
- * Derivation:
- *
- * End offset is the highest offset that this page should represent.
- * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
- * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
- * hence give us the correct page_dirty count. On any other page,
- * it will be zero and in that case we need page_dirty to be the
- * count of buffers on the page.
- */
- end_offset = min_t(unsigned long long,
- (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
- i_size_read(inode));
- len = 1 << inode->i_blkbits;
- p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
- PAGE_CACHE_SIZE);
- p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
- page_dirty = p_offset / len;
- bh = head = page_buffers(page);
- do {
- if (offset >= end_offset)
- break;
- if (!buffer_uptodate(bh))
- uptodate = 0;
- if (!(PageUptodate(page) || buffer_uptodate(bh))) {
- done = 1;
- continue;
- }
- if (buffer_unwritten(bh) || buffer_delay(bh)) {
- if (buffer_unwritten(bh))
- type = IOMAP_UNWRITTEN;
- else
- type = IOMAP_DELAY;
- if (!xfs_iomap_valid(mp, offset)) {
- done = 1;
- continue;
- }
- ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
- ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
- xfs_map_at_offset(bh, offset, bbits, mp);
- if (startio) {
- xfs_add_to_ioend(inode, bh, offset,
- type, ioendp, done);
- } else {
- set_buffer_dirty(bh);
- unlock_buffer(bh);
- mark_buffer_dirty(bh);
- }
- page_dirty--;
- count++;
- } else {
- type = IOMAP_NEW;
- if (buffer_mapped(bh) && all_bh && startio) {
- lock_buffer(bh);
- xfs_add_to_ioend(inode, bh, offset,
- type, ioendp, done);
- count++;
- page_dirty--;
- } else {
- done = 1;
- }
- }
- } while (offset += len, (bh = bh->b_this_page) != head);
- if (uptodate && bh == head)
- SetPageUptodate(page);
- if (startio) {
- if (count) {
- struct backing_dev_info *bdi;
- bdi = inode->i_mapping->backing_dev_info;
- wbc->nr_to_write--;
- if (bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
- done = 1;
- } else if (wbc->nr_to_write <= 0) {
- done = 1;
- }
- }
- xfs_start_page_writeback(page, !page_dirty, count);
- }
- return done;
- fail_unlock_page:
- unlock_page(page);
- fail:
- return 1;
- }
- /*
- * Convert & write out a cluster of pages in the same extent as defined
- * by mp and following the start page.
- */
- STATIC void
- xfs_cluster_write(
- struct inode *inode,
- pgoff_t tindex,
- xfs_iomap_t *iomapp,
- xfs_ioend_t **ioendp,
- struct writeback_control *wbc,
- int startio,
- int all_bh,
- pgoff_t tlast)
- {
- struct pagevec pvec;
- int done = 0, i;
- pagevec_init(&pvec, 0);
- while (!done && tindex <= tlast) {
- unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
- if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
- break;
- for (i = 0; i < pagevec_count(&pvec); i++) {
- done = xfs_convert_page(inode, pvec.pages[i], tindex++,
- iomapp, ioendp, wbc, startio, all_bh);
- if (done)
- break;
- }
- pagevec_release(&pvec);
- cond_resched();
- }
- }
- /*
- * Calling this without startio set means we are being asked to make a dirty
- * page ready for freeing it's buffers. When called with startio set then
- * we are coming from writepage.
- *
- * When called with startio set it is important that we write the WHOLE
- * page if possible.
- * The bh->b_state's cannot know if any of the blocks or which block for
- * that matter are dirty due to mmap writes, and therefore bh uptodate is
- * only valid if the page itself isn't completely uptodate. Some layers
- * may clear the page dirty flag prior to calling write page, under the
- * assumption the entire page will be written out; by not writing out the
- * whole page the page can be reused before all valid dirty data is
- * written out. Note: in the case of a page that has been dirty'd by
- * mapwrite and but partially setup by block_prepare_write the
- * bh->b_states's will not agree and only ones setup by BPW/BCW will have
- * valid state, thus the whole page must be written out thing.
- */
- STATIC int
- xfs_page_state_convert(
- struct inode *inode,
- struct page *page,
- struct writeback_control *wbc,
- int startio,
- int unmapped) /* also implies page uptodate */
- {
- struct buffer_head *bh, *head;
- xfs_iomap_t iomap;
- xfs_ioend_t *ioend = NULL, *iohead = NULL;
- loff_t offset;
- unsigned long p_offset = 0;
- unsigned int type;
- __uint64_t end_offset;
- pgoff_t end_index, last_index, tlast;
- ssize_t size, len;
- int flags, err, iomap_valid = 0, uptodate = 1;
- int page_dirty, count = 0;
- int trylock = 0;
- int all_bh = unmapped;
- if (startio) {
- if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
- trylock |= BMAPI_TRYLOCK;
- }
- /* Is this page beyond the end of the file? */
- offset = i_size_read(inode);
- end_index = offset >> PAGE_CACHE_SHIFT;
- last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
- if (page->index >= end_index) {
- if ((page->index >= end_index + 1) ||
- !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
- if (startio)
- unlock_page(page);
- return 0;
- }
- }
- /*
- * page_dirty is initially a count of buffers on the page before
- * EOF and is decremented as we move each into a cleanable state.
- *
- * Derivation:
- *
- * End offset is the highest offset that this page should represent.
- * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
- * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
- * hence give us the correct page_dirty count. On any other page,
- * it will be zero and in that case we need page_dirty to be the
- * count of buffers on the page.
- */
- end_offset = min_t(unsigned long long,
- (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
- len = 1 << inode->i_blkbits;
- p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
- PAGE_CACHE_SIZE);
- p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
- page_dirty = p_offset / len;
- bh = head = page_buffers(page);
- offset = page_offset(page);
- flags = BMAPI_READ;
- type = IOMAP_NEW;
- /* TODO: cleanup count and page_dirty */
- do {
- if (offset >= end_offset)
- break;
- if (!buffer_uptodate(bh))
- uptodate = 0;
- if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
- /*
- * the iomap is actually still valid, but the ioend
- * isn't. shouldn't happen too often.
- */
- iomap_valid = 0;
- continue;
- }
- if (iomap_valid)
- iomap_valid = xfs_iomap_valid(&iomap, offset);
- /*
- * First case, map an unwritten extent and prepare for
- * extent state conversion transaction on completion.
- *
- * Second case, allocate space for a delalloc buffer.
- * We can return EAGAIN here in the release page case.
- *
- * Third case, an unmapped buffer was found, and we are
- * in a path where we need to write the whole page out.
- */
- if (buffer_unwritten(bh) || buffer_delay(bh) ||
- ((buffer_uptodate(bh) || PageUptodate(page)) &&
- !buffer_mapped(bh) && (unmapped || startio))) {
- int new_ioend = 0;
- /*
- * Make sure we don't use a read-only iomap
- */
- if (flags == BMAPI_READ)
- iomap_valid = 0;
- if (buffer_unwritten(bh)) {
- type = IOMAP_UNWRITTEN;
- flags = BMAPI_WRITE | BMAPI_IGNSTATE;
- } else if (buffer_delay(bh)) {
- type = IOMAP_DELAY;
- flags = BMAPI_ALLOCATE | trylock;
- } else {
- type = IOMAP_NEW;
- flags = BMAPI_WRITE | BMAPI_MMAP;
- }
- if (!iomap_valid) {
- /*
- * if we didn't have a valid mapping then we
- * need to ensure that we put the new mapping
- * in a new ioend structure. This needs to be
- * done to ensure that the ioends correctly
- * reflect the block mappings at io completion
- * for unwritten extent conversion.
- */
- new_ioend = 1;
- if (type == IOMAP_NEW) {
- size = xfs_probe_cluster(inode,
- page, bh, head, 0);
- } else {
- size = len;
- }
- err = xfs_map_blocks(inode, offset, size,
- &iomap, flags);
- if (err)
- goto error;
- iomap_valid = xfs_iomap_valid(&iomap, offset);
- }
- if (iomap_valid) {
- xfs_map_at_offset(bh, offset,
- inode->i_blkbits, &iomap);
- if (startio) {
- xfs_add_to_ioend(inode, bh, offset,
- type, &ioend,
- new_ioend);
- } else {
- set_buffer_dirty(bh);
- unlock_buffer(bh);
- mark_buffer_dirty(bh);
- }
- page_dirty--;
- count++;
- }
- } else if (buffer_uptodate(bh) && startio) {
- /*
- * we got here because the buffer is already mapped.
- * That means it must already have extents allocated
- * underneath it. Map the extent by reading it.
- */
- if (!iomap_valid || flags != BMAPI_READ) {
- flags = BMAPI_READ;
- size = xfs_probe_cluster(inode, page, bh,
- head, 1);
- err = xfs_map_blocks(inode, offset, size,
- &iomap, flags);
- if (err)
- goto error;
- iomap_valid = xfs_iomap_valid(&iomap, offset);
- }
- /*
- * We set the type to IOMAP_NEW in case we are doing a
- * small write at EOF that is extending the file but
- * without needing an allocation. We need to update the
- * file size on I/O completion in this case so it is
- * the same case as having just allocated a new extent
- * that we are writing into for the first time.
- */
- type = IOMAP_NEW;
- if (trylock_buffer(bh)) {
- ASSERT(buffer_mapped(bh));
- if (iomap_valid)
- all_bh = 1;
- xfs_add_to_ioend(inode, bh, offset, type,
- &ioend, !iomap_valid);
- page_dirty--;
- count++;
- } else {
- iomap_valid = 0;
- }
- } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
- (unmapped || startio)) {
- iomap_valid = 0;
- }
- if (!iohead)
- iohead = ioend;
- } while (offset += len, ((bh = bh->b_this_page) != head));
- if (uptodate && bh == head)
- SetPageUptodate(page);
- if (startio)
- xfs_start_page_writeback(page, 1, count);
- if (ioend && iomap_valid) {
- offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
- PAGE_CACHE_SHIFT;
- tlast = min_t(pgoff_t, offset, last_index);
- xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
- wbc, startio, all_bh, tlast);
- }
- if (iohead)
- xfs_submit_ioend(iohead);
- return page_dirty;
- error:
- if (iohead)
- xfs_cancel_ioend(iohead);
- /*
- * If it's delalloc and we have nowhere to put it,
- * throw it away, unless the lower layers told
- * us to try again.
- */
- if (err != -EAGAIN) {
- if (!unmapped)
- block_invalidatepage(page, 0);
- ClearPageUptodate(page);
- }
- return err;
- }
- /*
- * writepage: Called from one of two places:
- *
- * 1. we are flushing a delalloc buffer head.
- *
- * 2. we are writing out a dirty page. Typically the page dirty
- * state is cleared before we get here. In this case is it
- * conceivable we have no buffer heads.
- *
- * For delalloc space on the page we need to allocate space and
- * flush it. For unmapped buffer heads on the page we should
- * allocate space if the page is uptodate. For any other dirty
- * buffer heads on the page we should flush them.
- *
- * If we detect that a transaction would be required to flush
- * the page, we have to check the process flags first, if we
- * are already in a transaction or disk I/O during allocations
- * is off, we need to fail the writepage and redirty the page.
- */
- STATIC int
- xfs_vm_writepage(
- struct page *page,
- struct writeback_control *wbc)
- {
- int error;
- int need_trans;
- int delalloc, unmapped, unwritten;
- struct inode *inode = page->mapping->host;
- xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
- /*
- * We need a transaction if:
- * 1. There are delalloc buffers on the page
- * 2. The page is uptodate and we have unmapped buffers
- * 3. The page is uptodate and we have no buffers
- * 4. There are unwritten buffers on the page
- */
- if (!page_has_buffers(page)) {
- unmapped = 1;
- need_trans = 1;
- } else {
- xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
- if (!PageUptodate(page))
- unmapped = 0;
- need_trans = delalloc + unmapped + unwritten;
- }
- /*
- * If we need a transaction and the process flags say
- * we are already in a transaction, or no IO is allowed
- * then mark the page dirty again and leave the page
- * as is.
- */
- if (current_test_flags(PF_FSTRANS) && need_trans)
- goto out_fail;
- /*
- * Delay hooking up buffer heads until we have
- * made our go/no-go decision.
- */
- if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << inode->i_blkbits, 0);
- /*
- * Convert delayed allocate, unwritten or unmapped space
- * to real space and flush out to disk.
- */
- error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
- if (error == -EAGAIN)
- goto out_fail;
- if (unlikely(error < 0))
- goto out_unlock;
- return 0;
- out_fail:
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
- out_unlock:
- unlock_page(page);
- return error;
- }
- STATIC int
- xfs_vm_writepages(
- struct address_space *mapping,
- struct writeback_control *wbc)
- {
- xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
- return generic_writepages(mapping, wbc);
- }
- /*
- * Called to move a page into cleanable state - and from there
- * to be released. Possibly the page is already clean. We always
- * have buffer heads in this call.
- *
- * Returns 0 if the page is ok to release, 1 otherwise.
- *
- * Possible scenarios are:
- *
- * 1. We are being called to release a page which has been written
- * to via regular I/O. buffer heads will be dirty and possibly
- * delalloc. If no delalloc buffer heads in this case then we
- * can just return zero.
- *
- * 2. We are called to release a page which has been written via
- * mmap, all we need to do is ensure there is no delalloc
- * state in the buffer heads, if not we can let the caller
- * free them and we should come back later via writepage.
- */
- STATIC int
- xfs_vm_releasepage(
- struct page *page,
- gfp_t gfp_mask)
- {
- struct inode *inode = page->mapping->host;
- int dirty, delalloc, unmapped, unwritten;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 1,
- };
- xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
- if (!page_has_buffers(page))
- return 0;
- xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
- if (!delalloc && !unwritten)
- goto free_buffers;
- if (!(gfp_mask & __GFP_FS))
- return 0;
- /* If we are already inside a transaction or the thread cannot
- * do I/O, we cannot release this page.
- */
- if (current_test_flags(PF_FSTRANS))
- return 0;
- /*
- * Convert delalloc space to real space, do not flush the
- * data out to disk, that will be done by the caller.
- * Never need to allocate space here - we will always
- * come back to writepage in that case.
- */
- dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
- if (dirty == 0 && !unwritten)
- goto free_buffers;
- return 0;
- free_buffers:
- return try_to_free_buffers(page);
- }
- STATIC int
- __xfs_get_blocks(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- int create,
- int direct,
- bmapi_flags_t flags)
- {
- xfs_iomap_t iomap;
- xfs_off_t offset;
- ssize_t size;
- int niomap = 1;
- int error;
- offset = (xfs_off_t)iblock << inode->i_blkbits;
- ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
- size = bh_result->b_size;
- if (!create && direct && offset >= i_size_read(inode))
- return 0;
- error = xfs_iomap(XFS_I(inode), offset, size,
- create ? flags : BMAPI_READ, &iomap, &niomap);
- if (error)
- return -error;
- if (niomap == 0)
- return 0;
- if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
- /*
- * For unwritten extents do not report a disk address on
- * the read case (treat as if we're reading into a hole).
- */
- if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
- xfs_map_buffer(bh_result, &iomap, offset,
- inode->i_blkbits);
- }
- if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
- if (direct)
- bh_result->b_private = inode;
- set_buffer_unwritten(bh_result);
- }
- }
- /*
- * If this is a realtime file, data may be on a different device.
- * to that pointed to from the buffer_head b_bdev currently.
- */
- bh_result->b_bdev = iomap.iomap_target->bt_bdev;
- /*
- * If we previously allocated a block out beyond eof and we are now
- * coming back to use it then we will need to flag it as new even if it
- * has a disk address.
- *
- * With sub-block writes into unwritten extents we also need to mark
- * the buffer as new so that the unwritten parts of the buffer gets
- * correctly zeroed.
- */
- if (create &&
- ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
- (offset >= i_size_read(inode)) ||
- (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
- set_buffer_new(bh_result);
- if (iomap.iomap_flags & IOMAP_DELAY) {
- BUG_ON(direct);
- if (create) {
- set_buffer_uptodate(bh_result);
- set_buffer_mapped(bh_result);
- set_buffer_delay(bh_result);
- }
- }
- if (direct || size > (1 << inode->i_blkbits)) {
- ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
- offset = min_t(xfs_off_t,
- iomap.iomap_bsize - iomap.iomap_delta, size);
- bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
- }
- return 0;
- }
- int
- xfs_get_blocks(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- int create)
- {
- return __xfs_get_blocks(inode, iblock,
- bh_result, create, 0, BMAPI_WRITE);
- }
- STATIC int
- xfs_get_blocks_direct(
- struct inode *inode,
- sector_t iblock,
- struct buffer_head *bh_result,
- int create)
- {
- return __xfs_get_blocks(inode, iblock,
- bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
- }
- STATIC void
- xfs_end_io_direct(
- struct kiocb *iocb,
- loff_t offset,
- ssize_t size,
- void *private)
- {
- xfs_ioend_t *ioend = iocb->private;
- /*
- * Non-NULL private data means we need to issue a transaction to
- * convert a range from unwritten to written extents. This needs
- * to happen from process context but aio+dio I/O completion
- * happens from irq context so we need to defer it to a workqueue.
- * This is not necessary for synchronous direct I/O, but we do
- * it anyway to keep the code uniform and simpler.
- *
- * Well, if only it were that simple. Because synchronous direct I/O
- * requires extent conversion to occur *before* we return to userspace,
- * we have to wait for extent conversion to complete. Look at the
- * iocb that has been passed to us to determine if this is AIO or
- * not. If it is synchronous, tell xfs_finish_ioend() to kick the
- * workqueue and wait for it to complete.
- *
- * The core direct I/O code might be changed to always call the
- * completion handler in the future, in which case all this can
- * go away.
- */
- ioend->io_offset = offset;
- ioend->io_size = size;
- if (ioend->io_type == IOMAP_READ) {
- xfs_finish_ioend(ioend, 0);
- } else if (private && size > 0) {
- xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
- } else {
- /*
- * A direct I/O write ioend starts it's life in unwritten
- * state in case they map an unwritten extent. This write
- * didn't map an unwritten extent so switch it's completion
- * handler.
- */
- INIT_WORK(&ioend->io_work, xfs_end_bio_written);
- xfs_finish_ioend(ioend, 0);
- }
- /*
- * blockdev_direct_IO can return an error even after the I/O
- * completion handler was called. Thus we need to protect
- * against double-freeing.
- */
- iocb->private = NULL;
- }
- STATIC ssize_t
- xfs_vm_direct_IO(
- int rw,
- struct kiocb *iocb,
- const struct iovec *iov,
- loff_t offset,
- unsigned long nr_segs)
- {
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct block_device *bdev;
- ssize_t ret;
- bdev = xfs_find_bdev_for_inode(XFS_I(inode));
- if (rw == WRITE) {
- iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
- ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
- bdev, iov, offset, nr_segs,
- xfs_get_blocks_direct,
- xfs_end_io_direct);
- } else {
- iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
- ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
- bdev, iov, offset, nr_segs,
- xfs_get_blocks_direct,
- xfs_end_io_direct);
- }
- if (unlikely(ret != -EIOCBQUEUED && iocb->private))
- xfs_destroy_ioend(iocb->private);
- return ret;
- }
- STATIC int
- xfs_vm_write_begin(
- struct file *file,
- struct address_space *mapping,
- loff_t pos,
- unsigned len,
- unsigned flags,
- struct page **pagep,
- void **fsdata)
- {
- *pagep = NULL;
- return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- xfs_get_blocks);
- }
- STATIC sector_t
- xfs_vm_bmap(
- struct address_space *mapping,
- sector_t block)
- {
- struct inode *inode = (struct inode *)mapping->host;
- struct xfs_inode *ip = XFS_I(inode);
- xfs_itrace_entry(XFS_I(inode));
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
- xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- return generic_block_bmap(mapping, block, xfs_get_blocks);
- }
- STATIC int
- xfs_vm_readpage(
- struct file *unused,
- struct page *page)
- {
- return mpage_readpage(page, xfs_get_blocks);
- }
- STATIC int
- xfs_vm_readpages(
- struct file *unused,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned nr_pages)
- {
- return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
- }
- STATIC void
- xfs_vm_invalidatepage(
- struct page *page,
- unsigned long offset)
- {
- xfs_page_trace(XFS_INVALIDPAGE_ENTER,
- page->mapping->host, page, offset);
- block_invalidatepage(page, offset);
- }
- const struct address_space_operations xfs_address_space_operations = {
- .readpage = xfs_vm_readpage,
- .readpages = xfs_vm_readpages,
- .writepage = xfs_vm_writepage,
- .writepages = xfs_vm_writepages,
- .sync_page = block_sync_page,
- .releasepage = xfs_vm_releasepage,
- .invalidatepage = xfs_vm_invalidatepage,
- .write_begin = xfs_vm_write_begin,
- .write_end = generic_write_end,
- .bmap = xfs_vm_bmap,
- .direct_IO = xfs_vm_direct_IO,
- .migratepage = buffer_migrate_page,
- .is_partially_uptodate = block_is_partially_uptodate,
- };
|