12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811 |
- /*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
- #include "xfs.h"
- #include <linux/stddef.h>
- #include <linux/errno.h>
- #include <linux/gfp.h>
- #include <linux/pagemap.h>
- #include <linux/init.h>
- #include <linux/vmalloc.h>
- #include <linux/bio.h>
- #include <linux/sysctl.h>
- #include <linux/proc_fs.h>
- #include <linux/workqueue.h>
- #include <linux/percpu.h>
- #include <linux/blkdev.h>
- #include <linux/hash.h>
- #include <linux/kthread.h>
- #include <linux/migrate.h>
- #include <linux/backing-dev.h>
- #include <linux/freezer.h>
- #include "xfs_sb.h"
- #include "xfs_log.h"
- #include "xfs_ag.h"
- #include "xfs_mount.h"
- #include "xfs_trace.h"
- static kmem_zone_t *xfs_buf_zone;
- static struct workqueue_struct *xfslogd_workqueue;
- #ifdef XFS_BUF_LOCK_TRACKING
- # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
- # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
- # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
- #else
- # define XB_SET_OWNER(bp) do { } while (0)
- # define XB_CLEAR_OWNER(bp) do { } while (0)
- # define XB_GET_OWNER(bp) do { } while (0)
- #endif
- #define xb_to_gfp(flags) \
- ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
- static inline int
- xfs_buf_is_vmapped(
- struct xfs_buf *bp)
- {
- /*
- * Return true if the buffer is vmapped.
- *
- * b_addr is null if the buffer is not mapped, but the code is clever
- * enough to know it doesn't have to map a single page, so the check has
- * to be both for b_addr and bp->b_page_count > 1.
- */
- return bp->b_addr && bp->b_page_count > 1;
- }
- static inline int
- xfs_buf_vmap_len(
- struct xfs_buf *bp)
- {
- return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
- }
- /*
- * xfs_buf_lru_add - add a buffer to the LRU.
- *
- * The LRU takes a new reference to the buffer so that it will only be freed
- * once the shrinker takes the buffer off the LRU.
- */
- STATIC void
- xfs_buf_lru_add(
- struct xfs_buf *bp)
- {
- struct xfs_buftarg *btp = bp->b_target;
- spin_lock(&btp->bt_lru_lock);
- if (list_empty(&bp->b_lru)) {
- atomic_inc(&bp->b_hold);
- list_add_tail(&bp->b_lru, &btp->bt_lru);
- btp->bt_lru_nr++;
- }
- spin_unlock(&btp->bt_lru_lock);
- }
- /*
- * xfs_buf_lru_del - remove a buffer from the LRU
- *
- * The unlocked check is safe here because it only occurs when there are not
- * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
- * to optimise the shrinker removing the buffer from the LRU and calling
- * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
- * bt_lru_lock.
- */
- STATIC void
- xfs_buf_lru_del(
- struct xfs_buf *bp)
- {
- struct xfs_buftarg *btp = bp->b_target;
- if (list_empty(&bp->b_lru))
- return;
- spin_lock(&btp->bt_lru_lock);
- if (!list_empty(&bp->b_lru)) {
- list_del_init(&bp->b_lru);
- btp->bt_lru_nr--;
- }
- spin_unlock(&btp->bt_lru_lock);
- }
- /*
- * When we mark a buffer stale, we remove the buffer from the LRU and clear the
- * b_lru_ref count so that the buffer is freed immediately when the buffer
- * reference count falls to zero. If the buffer is already on the LRU, we need
- * to remove the reference that LRU holds on the buffer.
- *
- * This prevents build-up of stale buffers on the LRU.
- */
- void
- xfs_buf_stale(
- struct xfs_buf *bp)
- {
- ASSERT(xfs_buf_islocked(bp));
- bp->b_flags |= XBF_STALE;
- /*
- * Clear the delwri status so that a delwri queue walker will not
- * flush this buffer to disk now that it is stale. The delwri queue has
- * a reference to the buffer, so this is safe to do.
- */
- bp->b_flags &= ~_XBF_DELWRI_Q;
- atomic_set(&(bp)->b_lru_ref, 0);
- if (!list_empty(&bp->b_lru)) {
- struct xfs_buftarg *btp = bp->b_target;
- spin_lock(&btp->bt_lru_lock);
- if (!list_empty(&bp->b_lru)) {
- list_del_init(&bp->b_lru);
- btp->bt_lru_nr--;
- atomic_dec(&bp->b_hold);
- }
- spin_unlock(&btp->bt_lru_lock);
- }
- ASSERT(atomic_read(&bp->b_hold) >= 1);
- }
- static int
- xfs_buf_get_maps(
- struct xfs_buf *bp,
- int map_count)
- {
- ASSERT(bp->b_maps == NULL);
- bp->b_map_count = map_count;
- if (map_count == 1) {
- bp->b_maps = &bp->b_map;
- return 0;
- }
- bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
- KM_NOFS);
- if (!bp->b_maps)
- return ENOMEM;
- return 0;
- }
- /*
- * Frees b_pages if it was allocated.
- */
- static void
- xfs_buf_free_maps(
- struct xfs_buf *bp)
- {
- if (bp->b_maps != &bp->b_map) {
- kmem_free(bp->b_maps);
- bp->b_maps = NULL;
- }
- }
- struct xfs_buf *
- _xfs_buf_alloc(
- struct xfs_buftarg *target,
- struct xfs_buf_map *map,
- int nmaps,
- xfs_buf_flags_t flags)
- {
- struct xfs_buf *bp;
- int error;
- int i;
- bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
- if (unlikely(!bp))
- return NULL;
- /*
- * We don't want certain flags to appear in b_flags unless they are
- * specifically set by later operations on the buffer.
- */
- flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
- atomic_set(&bp->b_hold, 1);
- atomic_set(&bp->b_lru_ref, 1);
- init_completion(&bp->b_iowait);
- INIT_LIST_HEAD(&bp->b_lru);
- INIT_LIST_HEAD(&bp->b_list);
- RB_CLEAR_NODE(&bp->b_rbnode);
- sema_init(&bp->b_sema, 0); /* held, no waiters */
- XB_SET_OWNER(bp);
- bp->b_target = target;
- bp->b_flags = flags;
- /*
- * Set length and io_length to the same value initially.
- * I/O routines should use io_length, which will be the same in
- * most cases but may be reset (e.g. XFS recovery).
- */
- error = xfs_buf_get_maps(bp, nmaps);
- if (error) {
- kmem_zone_free(xfs_buf_zone, bp);
- return NULL;
- }
- bp->b_bn = map[0].bm_bn;
- bp->b_length = 0;
- for (i = 0; i < nmaps; i++) {
- bp->b_maps[i].bm_bn = map[i].bm_bn;
- bp->b_maps[i].bm_len = map[i].bm_len;
- bp->b_length += map[i].bm_len;
- }
- bp->b_io_length = bp->b_length;
- atomic_set(&bp->b_pin_count, 0);
- init_waitqueue_head(&bp->b_waiters);
- XFS_STATS_INC(xb_create);
- trace_xfs_buf_init(bp, _RET_IP_);
- return bp;
- }
- /*
- * Allocate a page array capable of holding a specified number
- * of pages, and point the page buf at it.
- */
- STATIC int
- _xfs_buf_get_pages(
- xfs_buf_t *bp,
- int page_count,
- xfs_buf_flags_t flags)
- {
- /* Make sure that we have a page list */
- if (bp->b_pages == NULL) {
- bp->b_page_count = page_count;
- if (page_count <= XB_PAGES) {
- bp->b_pages = bp->b_page_array;
- } else {
- bp->b_pages = kmem_alloc(sizeof(struct page *) *
- page_count, KM_NOFS);
- if (bp->b_pages == NULL)
- return -ENOMEM;
- }
- memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
- }
- return 0;
- }
- /*
- * Frees b_pages if it was allocated.
- */
- STATIC void
- _xfs_buf_free_pages(
- xfs_buf_t *bp)
- {
- if (bp->b_pages != bp->b_page_array) {
- kmem_free(bp->b_pages);
- bp->b_pages = NULL;
- }
- }
- /*
- * Releases the specified buffer.
- *
- * The modification state of any associated pages is left unchanged.
- * The buffer most not be on any hash - use xfs_buf_rele instead for
- * hashed and refcounted buffers
- */
- void
- xfs_buf_free(
- xfs_buf_t *bp)
- {
- trace_xfs_buf_free(bp, _RET_IP_);
- ASSERT(list_empty(&bp->b_lru));
- if (bp->b_flags & _XBF_PAGES) {
- uint i;
- if (xfs_buf_is_vmapped(bp))
- vm_unmap_ram(bp->b_addr - bp->b_offset,
- bp->b_page_count);
- for (i = 0; i < bp->b_page_count; i++) {
- struct page *page = bp->b_pages[i];
- __free_page(page);
- }
- } else if (bp->b_flags & _XBF_KMEM)
- kmem_free(bp->b_addr);
- _xfs_buf_free_pages(bp);
- xfs_buf_free_maps(bp);
- kmem_zone_free(xfs_buf_zone, bp);
- }
- /*
- * Allocates all the pages for buffer in question and builds it's page list.
- */
- STATIC int
- xfs_buf_allocate_memory(
- xfs_buf_t *bp,
- uint flags)
- {
- size_t size;
- size_t nbytes, offset;
- gfp_t gfp_mask = xb_to_gfp(flags);
- unsigned short page_count, i;
- xfs_off_t start, end;
- int error;
- /*
- * for buffers that are contained within a single page, just allocate
- * the memory from the heap - there's no need for the complexity of
- * page arrays to keep allocation down to order 0.
- */
- size = BBTOB(bp->b_length);
- if (size < PAGE_SIZE) {
- bp->b_addr = kmem_alloc(size, KM_NOFS);
- if (!bp->b_addr) {
- /* low memory - use alloc_page loop instead */
- goto use_alloc_page;
- }
- if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
- ((unsigned long)bp->b_addr & PAGE_MASK)) {
- /* b_addr spans two pages - use alloc_page instead */
- kmem_free(bp->b_addr);
- bp->b_addr = NULL;
- goto use_alloc_page;
- }
- bp->b_offset = offset_in_page(bp->b_addr);
- bp->b_pages = bp->b_page_array;
- bp->b_pages[0] = virt_to_page(bp->b_addr);
- bp->b_page_count = 1;
- bp->b_flags |= _XBF_KMEM;
- return 0;
- }
- use_alloc_page:
- start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT;
- end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1)
- >> PAGE_SHIFT;
- page_count = end - start;
- error = _xfs_buf_get_pages(bp, page_count, flags);
- if (unlikely(error))
- return error;
- offset = bp->b_offset;
- bp->b_flags |= _XBF_PAGES;
- for (i = 0; i < bp->b_page_count; i++) {
- struct page *page;
- uint retries = 0;
- retry:
- page = alloc_page(gfp_mask);
- if (unlikely(page == NULL)) {
- if (flags & XBF_READ_AHEAD) {
- bp->b_page_count = i;
- error = ENOMEM;
- goto out_free_pages;
- }
- /*
- * This could deadlock.
- *
- * But until all the XFS lowlevel code is revamped to
- * handle buffer allocation failures we can't do much.
- */
- if (!(++retries % 100))
- xfs_err(NULL,
- "possible memory allocation deadlock in %s (mode:0x%x)",
- __func__, gfp_mask);
- XFS_STATS_INC(xb_page_retries);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- goto retry;
- }
- XFS_STATS_INC(xb_page_found);
- nbytes = min_t(size_t, size, PAGE_SIZE - offset);
- size -= nbytes;
- bp->b_pages[i] = page;
- offset = 0;
- }
- return 0;
- out_free_pages:
- for (i = 0; i < bp->b_page_count; i++)
- __free_page(bp->b_pages[i]);
- return error;
- }
- /*
- * Map buffer into kernel address-space if necessary.
- */
- STATIC int
- _xfs_buf_map_pages(
- xfs_buf_t *bp,
- uint flags)
- {
- ASSERT(bp->b_flags & _XBF_PAGES);
- if (bp->b_page_count == 1) {
- /* A single page buffer is always mappable */
- bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
- } else if (flags & XBF_UNMAPPED) {
- bp->b_addr = NULL;
- } else {
- int retried = 0;
- do {
- bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
- -1, PAGE_KERNEL);
- if (bp->b_addr)
- break;
- vm_unmap_aliases();
- } while (retried++ <= 1);
- if (!bp->b_addr)
- return -ENOMEM;
- bp->b_addr += bp->b_offset;
- }
- return 0;
- }
- /*
- * Finding and Reading Buffers
- */
- /*
- * Look up, and creates if absent, a lockable buffer for
- * a given range of an inode. The buffer is returned
- * locked. No I/O is implied by this call.
- */
- xfs_buf_t *
- _xfs_buf_find(
- struct xfs_buftarg *btp,
- struct xfs_buf_map *map,
- int nmaps,
- xfs_buf_flags_t flags,
- xfs_buf_t *new_bp)
- {
- size_t numbytes;
- struct xfs_perag *pag;
- struct rb_node **rbp;
- struct rb_node *parent;
- xfs_buf_t *bp;
- xfs_daddr_t blkno = map[0].bm_bn;
- int numblks = 0;
- int i;
- for (i = 0; i < nmaps; i++)
- numblks += map[i].bm_len;
- numbytes = BBTOB(numblks);
- /* Check for IOs smaller than the sector size / not sector aligned */
- ASSERT(!(numbytes < (1 << btp->bt_sshift)));
- ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
- /* get tree root */
- pag = xfs_perag_get(btp->bt_mount,
- xfs_daddr_to_agno(btp->bt_mount, blkno));
- /* walk tree */
- spin_lock(&pag->pag_buf_lock);
- rbp = &pag->pag_buf_tree.rb_node;
- parent = NULL;
- bp = NULL;
- while (*rbp) {
- parent = *rbp;
- bp = rb_entry(parent, struct xfs_buf, b_rbnode);
- if (blkno < bp->b_bn)
- rbp = &(*rbp)->rb_left;
- else if (blkno > bp->b_bn)
- rbp = &(*rbp)->rb_right;
- else {
- /*
- * found a block number match. If the range doesn't
- * match, the only way this is allowed is if the buffer
- * in the cache is stale and the transaction that made
- * it stale has not yet committed. i.e. we are
- * reallocating a busy extent. Skip this buffer and
- * continue searching to the right for an exact match.
- */
- if (bp->b_length != numblks) {
- ASSERT(bp->b_flags & XBF_STALE);
- rbp = &(*rbp)->rb_right;
- continue;
- }
- atomic_inc(&bp->b_hold);
- goto found;
- }
- }
- /* No match found */
- if (new_bp) {
- rb_link_node(&new_bp->b_rbnode, parent, rbp);
- rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
- /* the buffer keeps the perag reference until it is freed */
- new_bp->b_pag = pag;
- spin_unlock(&pag->pag_buf_lock);
- } else {
- XFS_STATS_INC(xb_miss_locked);
- spin_unlock(&pag->pag_buf_lock);
- xfs_perag_put(pag);
- }
- return new_bp;
- found:
- spin_unlock(&pag->pag_buf_lock);
- xfs_perag_put(pag);
- if (!xfs_buf_trylock(bp)) {
- if (flags & XBF_TRYLOCK) {
- xfs_buf_rele(bp);
- XFS_STATS_INC(xb_busy_locked);
- return NULL;
- }
- xfs_buf_lock(bp);
- XFS_STATS_INC(xb_get_locked_waited);
- }
- /*
- * if the buffer is stale, clear all the external state associated with
- * it. We need to keep flags such as how we allocated the buffer memory
- * intact here.
- */
- if (bp->b_flags & XBF_STALE) {
- ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
- bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
- }
- trace_xfs_buf_find(bp, flags, _RET_IP_);
- XFS_STATS_INC(xb_get_locked);
- return bp;
- }
- /*
- * Assembles a buffer covering the specified range. The code is optimised for
- * cache hits, as metadata intensive workloads will see 3 orders of magnitude
- * more hits than misses.
- */
- struct xfs_buf *
- xfs_buf_get_map(
- struct xfs_buftarg *target,
- struct xfs_buf_map *map,
- int nmaps,
- xfs_buf_flags_t flags)
- {
- struct xfs_buf *bp;
- struct xfs_buf *new_bp;
- int error = 0;
- bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
- if (likely(bp))
- goto found;
- new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
- if (unlikely(!new_bp))
- return NULL;
- error = xfs_buf_allocate_memory(new_bp, flags);
- if (error) {
- xfs_buf_free(new_bp);
- return NULL;
- }
- bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
- if (!bp) {
- xfs_buf_free(new_bp);
- return NULL;
- }
- if (bp != new_bp)
- xfs_buf_free(new_bp);
- found:
- if (!bp->b_addr) {
- error = _xfs_buf_map_pages(bp, flags);
- if (unlikely(error)) {
- xfs_warn(target->bt_mount,
- "%s: failed to map pages\n", __func__);
- xfs_buf_relse(bp);
- return NULL;
- }
- }
- XFS_STATS_INC(xb_get);
- trace_xfs_buf_get(bp, flags, _RET_IP_);
- return bp;
- }
- STATIC int
- _xfs_buf_read(
- xfs_buf_t *bp,
- xfs_buf_flags_t flags)
- {
- ASSERT(!(flags & XBF_WRITE));
- ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL);
- bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
- bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
- xfs_buf_iorequest(bp);
- if (flags & XBF_ASYNC)
- return 0;
- return xfs_buf_iowait(bp);
- }
- xfs_buf_t *
- xfs_buf_read_map(
- struct xfs_buftarg *target,
- struct xfs_buf_map *map,
- int nmaps,
- xfs_buf_flags_t flags)
- {
- struct xfs_buf *bp;
- flags |= XBF_READ;
- bp = xfs_buf_get_map(target, map, nmaps, flags);
- if (bp) {
- trace_xfs_buf_read(bp, flags, _RET_IP_);
- if (!XFS_BUF_ISDONE(bp)) {
- XFS_STATS_INC(xb_get_read);
- _xfs_buf_read(bp, flags);
- } else if (flags & XBF_ASYNC) {
- /*
- * Read ahead call which is already satisfied,
- * drop the buffer
- */
- xfs_buf_relse(bp);
- return NULL;
- } else {
- /* We do not want read in the flags */
- bp->b_flags &= ~XBF_READ;
- }
- }
- return bp;
- }
- /*
- * If we are not low on memory then do the readahead in a deadlock
- * safe manner.
- */
- void
- xfs_buf_readahead_map(
- struct xfs_buftarg *target,
- struct xfs_buf_map *map,
- int nmaps)
- {
- if (bdi_read_congested(target->bt_bdi))
- return;
- xfs_buf_read_map(target, map, nmaps,
- XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
- }
- /*
- * Read an uncached buffer from disk. Allocates and returns a locked
- * buffer containing the disk contents or nothing.
- */
- struct xfs_buf *
- xfs_buf_read_uncached(
- struct xfs_buftarg *target,
- xfs_daddr_t daddr,
- size_t numblks,
- int flags)
- {
- xfs_buf_t *bp;
- int error;
- bp = xfs_buf_get_uncached(target, numblks, flags);
- if (!bp)
- return NULL;
- /* set up the buffer for a read IO */
- ASSERT(bp->b_map_count == 1);
- bp->b_bn = daddr;
- bp->b_maps[0].bm_bn = daddr;
- bp->b_flags |= XBF_READ;
- xfsbdstrat(target->bt_mount, bp);
- error = xfs_buf_iowait(bp);
- if (error) {
- xfs_buf_relse(bp);
- return NULL;
- }
- return bp;
- }
- /*
- * Return a buffer allocated as an empty buffer and associated to external
- * memory via xfs_buf_associate_memory() back to it's empty state.
- */
- void
- xfs_buf_set_empty(
- struct xfs_buf *bp,
- size_t numblks)
- {
- if (bp->b_pages)
- _xfs_buf_free_pages(bp);
- bp->b_pages = NULL;
- bp->b_page_count = 0;
- bp->b_addr = NULL;
- bp->b_length = numblks;
- bp->b_io_length = numblks;
- ASSERT(bp->b_map_count == 1);
- bp->b_bn = XFS_BUF_DADDR_NULL;
- bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
- bp->b_maps[0].bm_len = bp->b_length;
- }
- static inline struct page *
- mem_to_page(
- void *addr)
- {
- if ((!is_vmalloc_addr(addr))) {
- return virt_to_page(addr);
- } else {
- return vmalloc_to_page(addr);
- }
- }
- int
- xfs_buf_associate_memory(
- xfs_buf_t *bp,
- void *mem,
- size_t len)
- {
- int rval;
- int i = 0;
- unsigned long pageaddr;
- unsigned long offset;
- size_t buflen;
- int page_count;
- pageaddr = (unsigned long)mem & PAGE_MASK;
- offset = (unsigned long)mem - pageaddr;
- buflen = PAGE_ALIGN(len + offset);
- page_count = buflen >> PAGE_SHIFT;
- /* Free any previous set of page pointers */
- if (bp->b_pages)
- _xfs_buf_free_pages(bp);
- bp->b_pages = NULL;
- bp->b_addr = mem;
- rval = _xfs_buf_get_pages(bp, page_count, 0);
- if (rval)
- return rval;
- bp->b_offset = offset;
- for (i = 0; i < bp->b_page_count; i++) {
- bp->b_pages[i] = mem_to_page((void *)pageaddr);
- pageaddr += PAGE_SIZE;
- }
- bp->b_io_length = BTOBB(len);
- bp->b_length = BTOBB(buflen);
- return 0;
- }
- xfs_buf_t *
- xfs_buf_get_uncached(
- struct xfs_buftarg *target,
- size_t numblks,
- int flags)
- {
- unsigned long page_count;
- int error, i;
- struct xfs_buf *bp;
- DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
- bp = _xfs_buf_alloc(target, &map, 1, 0);
- if (unlikely(bp == NULL))
- goto fail;
- page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
- error = _xfs_buf_get_pages(bp, page_count, 0);
- if (error)
- goto fail_free_buf;
- for (i = 0; i < page_count; i++) {
- bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
- if (!bp->b_pages[i])
- goto fail_free_mem;
- }
- bp->b_flags |= _XBF_PAGES;
- error = _xfs_buf_map_pages(bp, 0);
- if (unlikely(error)) {
- xfs_warn(target->bt_mount,
- "%s: failed to map pages\n", __func__);
- goto fail_free_mem;
- }
- trace_xfs_buf_get_uncached(bp, _RET_IP_);
- return bp;
- fail_free_mem:
- while (--i >= 0)
- __free_page(bp->b_pages[i]);
- _xfs_buf_free_pages(bp);
- fail_free_buf:
- xfs_buf_free_maps(bp);
- kmem_zone_free(xfs_buf_zone, bp);
- fail:
- return NULL;
- }
- /*
- * Increment reference count on buffer, to hold the buffer concurrently
- * with another thread which may release (free) the buffer asynchronously.
- * Must hold the buffer already to call this function.
- */
- void
- xfs_buf_hold(
- xfs_buf_t *bp)
- {
- trace_xfs_buf_hold(bp, _RET_IP_);
- atomic_inc(&bp->b_hold);
- }
- /*
- * Releases a hold on the specified buffer. If the
- * the hold count is 1, calls xfs_buf_free.
- */
- void
- xfs_buf_rele(
- xfs_buf_t *bp)
- {
- struct xfs_perag *pag = bp->b_pag;
- trace_xfs_buf_rele(bp, _RET_IP_);
- if (!pag) {
- ASSERT(list_empty(&bp->b_lru));
- ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
- if (atomic_dec_and_test(&bp->b_hold))
- xfs_buf_free(bp);
- return;
- }
- ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
- ASSERT(atomic_read(&bp->b_hold) > 0);
- if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
- if (!(bp->b_flags & XBF_STALE) &&
- atomic_read(&bp->b_lru_ref)) {
- xfs_buf_lru_add(bp);
- spin_unlock(&pag->pag_buf_lock);
- } else {
- xfs_buf_lru_del(bp);
- ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
- rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
- spin_unlock(&pag->pag_buf_lock);
- xfs_perag_put(pag);
- xfs_buf_free(bp);
- }
- }
- }
- /*
- * Lock a buffer object, if it is not already locked.
- *
- * If we come across a stale, pinned, locked buffer, we know that we are
- * being asked to lock a buffer that has been reallocated. Because it is
- * pinned, we know that the log has not been pushed to disk and hence it
- * will still be locked. Rather than continuing to have trylock attempts
- * fail until someone else pushes the log, push it ourselves before
- * returning. This means that the xfsaild will not get stuck trying
- * to push on stale inode buffers.
- */
- int
- xfs_buf_trylock(
- struct xfs_buf *bp)
- {
- int locked;
- locked = down_trylock(&bp->b_sema) == 0;
- if (locked)
- XB_SET_OWNER(bp);
- else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
- xfs_log_force(bp->b_target->bt_mount, 0);
- trace_xfs_buf_trylock(bp, _RET_IP_);
- return locked;
- }
- /*
- * Lock a buffer object.
- *
- * If we come across a stale, pinned, locked buffer, we know that we
- * are being asked to lock a buffer that has been reallocated. Because
- * it is pinned, we know that the log has not been pushed to disk and
- * hence it will still be locked. Rather than sleeping until someone
- * else pushes the log, push it ourselves before trying to get the lock.
- */
- void
- xfs_buf_lock(
- struct xfs_buf *bp)
- {
- trace_xfs_buf_lock(bp, _RET_IP_);
- if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
- xfs_log_force(bp->b_target->bt_mount, 0);
- down(&bp->b_sema);
- XB_SET_OWNER(bp);
- trace_xfs_buf_lock_done(bp, _RET_IP_);
- }
- void
- xfs_buf_unlock(
- struct xfs_buf *bp)
- {
- XB_CLEAR_OWNER(bp);
- up(&bp->b_sema);
- trace_xfs_buf_unlock(bp, _RET_IP_);
- }
- STATIC void
- xfs_buf_wait_unpin(
- xfs_buf_t *bp)
- {
- DECLARE_WAITQUEUE (wait, current);
- if (atomic_read(&bp->b_pin_count) == 0)
- return;
- add_wait_queue(&bp->b_waiters, &wait);
- for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (atomic_read(&bp->b_pin_count) == 0)
- break;
- io_schedule();
- }
- remove_wait_queue(&bp->b_waiters, &wait);
- set_current_state(TASK_RUNNING);
- }
- /*
- * Buffer Utility Routines
- */
- STATIC void
- xfs_buf_iodone_work(
- struct work_struct *work)
- {
- xfs_buf_t *bp =
- container_of(work, xfs_buf_t, b_iodone_work);
- if (bp->b_iodone)
- (*(bp->b_iodone))(bp);
- else if (bp->b_flags & XBF_ASYNC)
- xfs_buf_relse(bp);
- }
- void
- xfs_buf_ioend(
- xfs_buf_t *bp,
- int schedule)
- {
- trace_xfs_buf_iodone(bp, _RET_IP_);
- bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
- if (bp->b_error == 0)
- bp->b_flags |= XBF_DONE;
- if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
- if (schedule) {
- INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
- queue_work(xfslogd_workqueue, &bp->b_iodone_work);
- } else {
- xfs_buf_iodone_work(&bp->b_iodone_work);
- }
- } else {
- complete(&bp->b_iowait);
- }
- }
- void
- xfs_buf_ioerror(
- xfs_buf_t *bp,
- int error)
- {
- ASSERT(error >= 0 && error <= 0xffff);
- bp->b_error = (unsigned short)error;
- trace_xfs_buf_ioerror(bp, error, _RET_IP_);
- }
- void
- xfs_buf_ioerror_alert(
- struct xfs_buf *bp,
- const char *func)
- {
- xfs_alert(bp->b_target->bt_mount,
- "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
- (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
- }
- /*
- * Called when we want to stop a buffer from getting written or read.
- * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
- * so that the proper iodone callbacks get called.
- */
- STATIC int
- xfs_bioerror(
- xfs_buf_t *bp)
- {
- #ifdef XFSERRORDEBUG
- ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
- #endif
- /*
- * No need to wait until the buffer is unpinned, we aren't flushing it.
- */
- xfs_buf_ioerror(bp, EIO);
- /*
- * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
- */
- XFS_BUF_UNREAD(bp);
- XFS_BUF_UNDONE(bp);
- xfs_buf_stale(bp);
- xfs_buf_ioend(bp, 0);
- return EIO;
- }
- /*
- * Same as xfs_bioerror, except that we are releasing the buffer
- * here ourselves, and avoiding the xfs_buf_ioend call.
- * This is meant for userdata errors; metadata bufs come with
- * iodone functions attached, so that we can track down errors.
- */
- STATIC int
- xfs_bioerror_relse(
- struct xfs_buf *bp)
- {
- int64_t fl = bp->b_flags;
- /*
- * No need to wait until the buffer is unpinned.
- * We aren't flushing it.
- *
- * chunkhold expects B_DONE to be set, whether
- * we actually finish the I/O or not. We don't want to
- * change that interface.
- */
- XFS_BUF_UNREAD(bp);
- XFS_BUF_DONE(bp);
- xfs_buf_stale(bp);
- bp->b_iodone = NULL;
- if (!(fl & XBF_ASYNC)) {
- /*
- * Mark b_error and B_ERROR _both_.
- * Lot's of chunkcache code assumes that.
- * There's no reason to mark error for
- * ASYNC buffers.
- */
- xfs_buf_ioerror(bp, EIO);
- complete(&bp->b_iowait);
- } else {
- xfs_buf_relse(bp);
- }
- return EIO;
- }
- STATIC int
- xfs_bdstrat_cb(
- struct xfs_buf *bp)
- {
- if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
- trace_xfs_bdstrat_shut(bp, _RET_IP_);
- /*
- * Metadata write that didn't get logged but
- * written delayed anyway. These aren't associated
- * with a transaction, and can be ignored.
- */
- if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
- return xfs_bioerror_relse(bp);
- else
- return xfs_bioerror(bp);
- }
- xfs_buf_iorequest(bp);
- return 0;
- }
- int
- xfs_bwrite(
- struct xfs_buf *bp)
- {
- int error;
- ASSERT(xfs_buf_islocked(bp));
- bp->b_flags |= XBF_WRITE;
- bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
- xfs_bdstrat_cb(bp);
- error = xfs_buf_iowait(bp);
- if (error) {
- xfs_force_shutdown(bp->b_target->bt_mount,
- SHUTDOWN_META_IO_ERROR);
- }
- return error;
- }
- /*
- * Wrapper around bdstrat so that we can stop data from going to disk in case
- * we are shutting down the filesystem. Typically user data goes thru this
- * path; one of the exceptions is the superblock.
- */
- void
- xfsbdstrat(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
- {
- if (XFS_FORCED_SHUTDOWN(mp)) {
- trace_xfs_bdstrat_shut(bp, _RET_IP_);
- xfs_bioerror_relse(bp);
- return;
- }
- xfs_buf_iorequest(bp);
- }
- STATIC void
- _xfs_buf_ioend(
- xfs_buf_t *bp,
- int schedule)
- {
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
- xfs_buf_ioend(bp, schedule);
- }
- STATIC void
- xfs_buf_bio_end_io(
- struct bio *bio,
- int error)
- {
- xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
- xfs_buf_ioerror(bp, -error);
- if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
- invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
- _xfs_buf_ioend(bp, 1);
- bio_put(bio);
- }
- static void
- xfs_buf_ioapply_map(
- struct xfs_buf *bp,
- int map,
- int *buf_offset,
- int *count,
- int rw)
- {
- int page_index;
- int total_nr_pages = bp->b_page_count;
- int nr_pages;
- struct bio *bio;
- sector_t sector = bp->b_maps[map].bm_bn;
- int size;
- int offset;
- total_nr_pages = bp->b_page_count;
- /* skip the pages in the buffer before the start offset */
- page_index = 0;
- offset = *buf_offset;
- while (offset >= PAGE_SIZE) {
- page_index++;
- offset -= PAGE_SIZE;
- }
- /*
- * Limit the IO size to the length of the current vector, and update the
- * remaining IO count for the next time around.
- */
- size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
- *count -= size;
- *buf_offset += size;
- next_chunk:
- atomic_inc(&bp->b_io_remaining);
- nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
- if (nr_pages > total_nr_pages)
- nr_pages = total_nr_pages;
- bio = bio_alloc(GFP_NOIO, nr_pages);
- bio->bi_bdev = bp->b_target->bt_bdev;
- bio->bi_sector = sector;
- bio->bi_end_io = xfs_buf_bio_end_io;
- bio->bi_private = bp;
- for (; size && nr_pages; nr_pages--, page_index++) {
- int rbytes, nbytes = PAGE_SIZE - offset;
- if (nbytes > size)
- nbytes = size;
- rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
- offset);
- if (rbytes < nbytes)
- break;
- offset = 0;
- sector += BTOBB(nbytes);
- size -= nbytes;
- total_nr_pages--;
- }
- if (likely(bio->bi_size)) {
- if (xfs_buf_is_vmapped(bp)) {
- flush_kernel_vmap_range(bp->b_addr,
- xfs_buf_vmap_len(bp));
- }
- submit_bio(rw, bio);
- if (size)
- goto next_chunk;
- } else {
- xfs_buf_ioerror(bp, EIO);
- bio_put(bio);
- }
- }
- STATIC void
- _xfs_buf_ioapply(
- struct xfs_buf *bp)
- {
- struct blk_plug plug;
- int rw;
- int offset;
- int size;
- int i;
- if (bp->b_flags & XBF_WRITE) {
- if (bp->b_flags & XBF_SYNCIO)
- rw = WRITE_SYNC;
- else
- rw = WRITE;
- if (bp->b_flags & XBF_FUA)
- rw |= REQ_FUA;
- if (bp->b_flags & XBF_FLUSH)
- rw |= REQ_FLUSH;
- } else if (bp->b_flags & XBF_READ_AHEAD) {
- rw = READA;
- } else {
- rw = READ;
- }
- /* we only use the buffer cache for meta-data */
- rw |= REQ_META;
- /*
- * Walk all the vectors issuing IO on them. Set up the initial offset
- * into the buffer and the desired IO size before we start -
- * _xfs_buf_ioapply_vec() will modify them appropriately for each
- * subsequent call.
- */
- offset = bp->b_offset;
- size = BBTOB(bp->b_io_length);
- blk_start_plug(&plug);
- for (i = 0; i < bp->b_map_count; i++) {
- xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
- if (bp->b_error)
- break;
- if (size <= 0)
- break; /* all done */
- }
- blk_finish_plug(&plug);
- }
- void
- xfs_buf_iorequest(
- xfs_buf_t *bp)
- {
- trace_xfs_buf_iorequest(bp, _RET_IP_);
- ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
- if (bp->b_flags & XBF_WRITE)
- xfs_buf_wait_unpin(bp);
- xfs_buf_hold(bp);
- /* Set the count to 1 initially, this will stop an I/O
- * completion callout which happens before we have started
- * all the I/O from calling xfs_buf_ioend too early.
- */
- atomic_set(&bp->b_io_remaining, 1);
- _xfs_buf_ioapply(bp);
- _xfs_buf_ioend(bp, 1);
- xfs_buf_rele(bp);
- }
- /*
- * Waits for I/O to complete on the buffer supplied. It returns immediately if
- * no I/O is pending or there is already a pending error on the buffer. It
- * returns the I/O error code, if any, or 0 if there was no error.
- */
- int
- xfs_buf_iowait(
- xfs_buf_t *bp)
- {
- trace_xfs_buf_iowait(bp, _RET_IP_);
- if (!bp->b_error)
- wait_for_completion(&bp->b_iowait);
- trace_xfs_buf_iowait_done(bp, _RET_IP_);
- return bp->b_error;
- }
- xfs_caddr_t
- xfs_buf_offset(
- xfs_buf_t *bp,
- size_t offset)
- {
- struct page *page;
- if (bp->b_addr)
- return bp->b_addr + offset;
- offset += bp->b_offset;
- page = bp->b_pages[offset >> PAGE_SHIFT];
- return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
- }
- /*
- * Move data into or out of a buffer.
- */
- void
- xfs_buf_iomove(
- xfs_buf_t *bp, /* buffer to process */
- size_t boff, /* starting buffer offset */
- size_t bsize, /* length to copy */
- void *data, /* data address */
- xfs_buf_rw_t mode) /* read/write/zero flag */
- {
- size_t bend;
- bend = boff + bsize;
- while (boff < bend) {
- struct page *page;
- int page_index, page_offset, csize;
- page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
- page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
- page = bp->b_pages[page_index];
- csize = min_t(size_t, PAGE_SIZE - page_offset,
- BBTOB(bp->b_io_length) - boff);
- ASSERT((csize + page_offset) <= PAGE_SIZE);
- switch (mode) {
- case XBRW_ZERO:
- memset(page_address(page) + page_offset, 0, csize);
- break;
- case XBRW_READ:
- memcpy(data, page_address(page) + page_offset, csize);
- break;
- case XBRW_WRITE:
- memcpy(page_address(page) + page_offset, data, csize);
- }
- boff += csize;
- data += csize;
- }
- }
- /*
- * Handling of buffer targets (buftargs).
- */
- /*
- * Wait for any bufs with callbacks that have been submitted but have not yet
- * returned. These buffers will have an elevated hold count, so wait on those
- * while freeing all the buffers only held by the LRU.
- */
- void
- xfs_wait_buftarg(
- struct xfs_buftarg *btp)
- {
- struct xfs_buf *bp;
- restart:
- spin_lock(&btp->bt_lru_lock);
- while (!list_empty(&btp->bt_lru)) {
- bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
- if (atomic_read(&bp->b_hold) > 1) {
- spin_unlock(&btp->bt_lru_lock);
- delay(100);
- goto restart;
- }
- /*
- * clear the LRU reference count so the buffer doesn't get
- * ignored in xfs_buf_rele().
- */
- atomic_set(&bp->b_lru_ref, 0);
- spin_unlock(&btp->bt_lru_lock);
- xfs_buf_rele(bp);
- spin_lock(&btp->bt_lru_lock);
- }
- spin_unlock(&btp->bt_lru_lock);
- }
- int
- xfs_buftarg_shrink(
- struct shrinker *shrink,
- struct shrink_control *sc)
- {
- struct xfs_buftarg *btp = container_of(shrink,
- struct xfs_buftarg, bt_shrinker);
- struct xfs_buf *bp;
- int nr_to_scan = sc->nr_to_scan;
- LIST_HEAD(dispose);
- if (!nr_to_scan)
- return btp->bt_lru_nr;
- spin_lock(&btp->bt_lru_lock);
- while (!list_empty(&btp->bt_lru)) {
- if (nr_to_scan-- <= 0)
- break;
- bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
- /*
- * Decrement the b_lru_ref count unless the value is already
- * zero. If the value is already zero, we need to reclaim the
- * buffer, otherwise it gets another trip through the LRU.
- */
- if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
- list_move_tail(&bp->b_lru, &btp->bt_lru);
- continue;
- }
- /*
- * remove the buffer from the LRU now to avoid needing another
- * lock round trip inside xfs_buf_rele().
- */
- list_move(&bp->b_lru, &dispose);
- btp->bt_lru_nr--;
- }
- spin_unlock(&btp->bt_lru_lock);
- while (!list_empty(&dispose)) {
- bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
- list_del_init(&bp->b_lru);
- xfs_buf_rele(bp);
- }
- return btp->bt_lru_nr;
- }
- void
- xfs_free_buftarg(
- struct xfs_mount *mp,
- struct xfs_buftarg *btp)
- {
- unregister_shrinker(&btp->bt_shrinker);
- if (mp->m_flags & XFS_MOUNT_BARRIER)
- xfs_blkdev_issue_flush(btp);
- kmem_free(btp);
- }
- STATIC int
- xfs_setsize_buftarg_flags(
- xfs_buftarg_t *btp,
- unsigned int blocksize,
- unsigned int sectorsize,
- int verbose)
- {
- btp->bt_bsize = blocksize;
- btp->bt_sshift = ffs(sectorsize) - 1;
- btp->bt_smask = sectorsize - 1;
- if (set_blocksize(btp->bt_bdev, sectorsize)) {
- char name[BDEVNAME_SIZE];
- bdevname(btp->bt_bdev, name);
- xfs_warn(btp->bt_mount,
- "Cannot set_blocksize to %u on device %s\n",
- sectorsize, name);
- return EINVAL;
- }
- return 0;
- }
- /*
- * When allocating the initial buffer target we have not yet
- * read in the superblock, so don't know what sized sectors
- * are being used is at this early stage. Play safe.
- */
- STATIC int
- xfs_setsize_buftarg_early(
- xfs_buftarg_t *btp,
- struct block_device *bdev)
- {
- return xfs_setsize_buftarg_flags(btp,
- PAGE_SIZE, bdev_logical_block_size(bdev), 0);
- }
- int
- xfs_setsize_buftarg(
- xfs_buftarg_t *btp,
- unsigned int blocksize,
- unsigned int sectorsize)
- {
- return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
- }
- xfs_buftarg_t *
- xfs_alloc_buftarg(
- struct xfs_mount *mp,
- struct block_device *bdev,
- int external,
- const char *fsname)
- {
- xfs_buftarg_t *btp;
- btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
- btp->bt_mount = mp;
- btp->bt_dev = bdev->bd_dev;
- btp->bt_bdev = bdev;
- btp->bt_bdi = blk_get_backing_dev_info(bdev);
- if (!btp->bt_bdi)
- goto error;
- INIT_LIST_HEAD(&btp->bt_lru);
- spin_lock_init(&btp->bt_lru_lock);
- if (xfs_setsize_buftarg_early(btp, bdev))
- goto error;
- btp->bt_shrinker.shrink = xfs_buftarg_shrink;
- btp->bt_shrinker.seeks = DEFAULT_SEEKS;
- register_shrinker(&btp->bt_shrinker);
- return btp;
- error:
- kmem_free(btp);
- return NULL;
- }
- /*
- * Add a buffer to the delayed write list.
- *
- * This queues a buffer for writeout if it hasn't already been. Note that
- * neither this routine nor the buffer list submission functions perform
- * any internal synchronization. It is expected that the lists are thread-local
- * to the callers.
- *
- * Returns true if we queued up the buffer, or false if it already had
- * been on the buffer list.
- */
- bool
- xfs_buf_delwri_queue(
- struct xfs_buf *bp,
- struct list_head *list)
- {
- ASSERT(xfs_buf_islocked(bp));
- ASSERT(!(bp->b_flags & XBF_READ));
- /*
- * If the buffer is already marked delwri it already is queued up
- * by someone else for imediate writeout. Just ignore it in that
- * case.
- */
- if (bp->b_flags & _XBF_DELWRI_Q) {
- trace_xfs_buf_delwri_queued(bp, _RET_IP_);
- return false;
- }
- trace_xfs_buf_delwri_queue(bp, _RET_IP_);
- /*
- * If a buffer gets written out synchronously or marked stale while it
- * is on a delwri list we lazily remove it. To do this, the other party
- * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
- * It remains referenced and on the list. In a rare corner case it
- * might get readded to a delwri list after the synchronous writeout, in
- * which case we need just need to re-add the flag here.
- */
- bp->b_flags |= _XBF_DELWRI_Q;
- if (list_empty(&bp->b_list)) {
- atomic_inc(&bp->b_hold);
- list_add_tail(&bp->b_list, list);
- }
- return true;
- }
- /*
- * Compare function is more complex than it needs to be because
- * the return value is only 32 bits and we are doing comparisons
- * on 64 bit values
- */
- static int
- xfs_buf_cmp(
- void *priv,
- struct list_head *a,
- struct list_head *b)
- {
- struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
- struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
- xfs_daddr_t diff;
- diff = ap->b_map.bm_bn - bp->b_map.bm_bn;
- if (diff < 0)
- return -1;
- if (diff > 0)
- return 1;
- return 0;
- }
- static int
- __xfs_buf_delwri_submit(
- struct list_head *buffer_list,
- struct list_head *io_list,
- bool wait)
- {
- struct blk_plug plug;
- struct xfs_buf *bp, *n;
- int pinned = 0;
- list_for_each_entry_safe(bp, n, buffer_list, b_list) {
- if (!wait) {
- if (xfs_buf_ispinned(bp)) {
- pinned++;
- continue;
- }
- if (!xfs_buf_trylock(bp))
- continue;
- } else {
- xfs_buf_lock(bp);
- }
- /*
- * Someone else might have written the buffer synchronously or
- * marked it stale in the meantime. In that case only the
- * _XBF_DELWRI_Q flag got cleared, and we have to drop the
- * reference and remove it from the list here.
- */
- if (!(bp->b_flags & _XBF_DELWRI_Q)) {
- list_del_init(&bp->b_list);
- xfs_buf_relse(bp);
- continue;
- }
- list_move_tail(&bp->b_list, io_list);
- trace_xfs_buf_delwri_split(bp, _RET_IP_);
- }
- list_sort(NULL, io_list, xfs_buf_cmp);
- blk_start_plug(&plug);
- list_for_each_entry_safe(bp, n, io_list, b_list) {
- bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
- bp->b_flags |= XBF_WRITE;
- if (!wait) {
- bp->b_flags |= XBF_ASYNC;
- list_del_init(&bp->b_list);
- }
- xfs_bdstrat_cb(bp);
- }
- blk_finish_plug(&plug);
- return pinned;
- }
- /*
- * Write out a buffer list asynchronously.
- *
- * This will take the @buffer_list, write all non-locked and non-pinned buffers
- * out and not wait for I/O completion on any of the buffers. This interface
- * is only safely useable for callers that can track I/O completion by higher
- * level means, e.g. AIL pushing as the @buffer_list is consumed in this
- * function.
- */
- int
- xfs_buf_delwri_submit_nowait(
- struct list_head *buffer_list)
- {
- LIST_HEAD (io_list);
- return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
- }
- /*
- * Write out a buffer list synchronously.
- *
- * This will take the @buffer_list, write all buffers out and wait for I/O
- * completion on all of the buffers. @buffer_list is consumed by the function,
- * so callers must have some other way of tracking buffers if they require such
- * functionality.
- */
- int
- xfs_buf_delwri_submit(
- struct list_head *buffer_list)
- {
- LIST_HEAD (io_list);
- int error = 0, error2;
- struct xfs_buf *bp;
- __xfs_buf_delwri_submit(buffer_list, &io_list, true);
- /* Wait for IO to complete. */
- while (!list_empty(&io_list)) {
- bp = list_first_entry(&io_list, struct xfs_buf, b_list);
- list_del_init(&bp->b_list);
- error2 = xfs_buf_iowait(bp);
- xfs_buf_relse(bp);
- if (!error)
- error = error2;
- }
- return error;
- }
- int __init
- xfs_buf_init(void)
- {
- xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
- KM_ZONE_HWALIGN, NULL);
- if (!xfs_buf_zone)
- goto out;
- xfslogd_workqueue = alloc_workqueue("xfslogd",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
- if (!xfslogd_workqueue)
- goto out_free_buf_zone;
- return 0;
- out_free_buf_zone:
- kmem_zone_destroy(xfs_buf_zone);
- out:
- return -ENOMEM;
- }
- void
- xfs_buf_terminate(void)
- {
- destroy_workqueue(xfslogd_workqueue);
- kmem_zone_destroy(xfs_buf_zone);
- }
|