123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637 |
- #include <linux/bitops.h>
- #include <linux/slab.h>
- #include <linux/bio.h>
- #include <linux/mm.h>
- #include <linux/gfp.h>
- #include <linux/pagemap.h>
- #include <linux/page-flags.h>
- #include <linux/module.h>
- #include <linux/spinlock.h>
- #include <linux/blkdev.h>
- #include "extent_map.h"
- /* temporary define until extent_map moves out of btrfs */
- struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
- unsigned long extra_flags,
- void (*ctor)(void *, struct kmem_cache *,
- unsigned long));
- static struct kmem_cache *extent_map_cache;
- static struct kmem_cache *extent_state_cache;
- static struct kmem_cache *extent_buffer_cache;
- static LIST_HEAD(extent_buffers);
- static LIST_HEAD(buffers);
- static LIST_HEAD(states);
- static spinlock_t extent_buffers_lock;
- static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
- static int nr_extent_buffers;
- #define MAX_EXTENT_BUFFER_CACHE 128
- struct tree_entry {
- u64 start;
- u64 end;
- int in_tree;
- struct rb_node rb_node;
- };
- void __init extent_map_init(void)
- {
- extent_map_cache = btrfs_cache_create("extent_map",
- sizeof(struct extent_map), 0,
- NULL);
- extent_state_cache = btrfs_cache_create("extent_state",
- sizeof(struct extent_state), 0,
- NULL);
- extent_buffer_cache = btrfs_cache_create("extent_buffers",
- sizeof(struct extent_buffer), 0,
- NULL);
- spin_lock_init(&extent_buffers_lock);
- }
- void __exit extent_map_exit(void)
- {
- struct extent_buffer *eb;
- struct extent_state *state;
- while (!list_empty(&extent_buffers)) {
- eb = list_entry(extent_buffers.next,
- struct extent_buffer, list);
- list_del(&eb->list);
- kmem_cache_free(extent_buffer_cache, eb);
- }
- while (!list_empty(&states)) {
- state = list_entry(states.next, struct extent_state, list);
- printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
- list_del(&state->list);
- kmem_cache_free(extent_state_cache, state);
- }
- while (!list_empty(&buffers)) {
- eb = list_entry(buffers.next,
- struct extent_buffer, leak_list);
- printk("buffer leak start %Lu len %lu return %lX\n", eb->start, eb->len, eb->alloc_addr);
- list_del(&eb->leak_list);
- kmem_cache_free(extent_buffer_cache, eb);
- }
- if (extent_map_cache)
- kmem_cache_destroy(extent_map_cache);
- if (extent_state_cache)
- kmem_cache_destroy(extent_state_cache);
- if (extent_buffer_cache)
- kmem_cache_destroy(extent_buffer_cache);
- }
- void extent_map_tree_init(struct extent_map_tree *tree,
- struct address_space *mapping, gfp_t mask)
- {
- tree->map.rb_node = NULL;
- tree->state.rb_node = NULL;
- tree->ops = NULL;
- rwlock_init(&tree->lock);
- tree->mapping = mapping;
- }
- EXPORT_SYMBOL(extent_map_tree_init);
- struct extent_map *alloc_extent_map(gfp_t mask)
- {
- struct extent_map *em;
- em = kmem_cache_alloc(extent_map_cache, mask);
- if (!em || IS_ERR(em))
- return em;
- em->in_tree = 0;
- atomic_set(&em->refs, 1);
- return em;
- }
- EXPORT_SYMBOL(alloc_extent_map);
- void free_extent_map(struct extent_map *em)
- {
- if (!em)
- return;
- if (atomic_dec_and_test(&em->refs)) {
- WARN_ON(em->in_tree);
- kmem_cache_free(extent_map_cache, em);
- }
- }
- EXPORT_SYMBOL(free_extent_map);
- struct extent_state *alloc_extent_state(gfp_t mask)
- {
- struct extent_state *state;
- unsigned long flags;
- state = kmem_cache_alloc(extent_state_cache, mask);
- if (!state || IS_ERR(state))
- return state;
- state->state = 0;
- state->in_tree = 0;
- state->private = 0;
- spin_lock_irqsave(&state_lock, flags);
- list_add(&state->list, &states);
- spin_unlock_irqrestore(&state_lock, flags);
- atomic_set(&state->refs, 1);
- init_waitqueue_head(&state->wq);
- return state;
- }
- EXPORT_SYMBOL(alloc_extent_state);
- void free_extent_state(struct extent_state *state)
- {
- unsigned long flags;
- if (!state)
- return;
- if (atomic_dec_and_test(&state->refs)) {
- WARN_ON(state->in_tree);
- spin_lock_irqsave(&state_lock, flags);
- list_del(&state->list);
- spin_unlock_irqrestore(&state_lock, flags);
- kmem_cache_free(extent_state_cache, state);
- }
- }
- EXPORT_SYMBOL(free_extent_state);
- static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
- struct rb_node *node)
- {
- struct rb_node ** p = &root->rb_node;
- struct rb_node * parent = NULL;
- struct tree_entry *entry;
- while(*p) {
- parent = *p;
- entry = rb_entry(parent, struct tree_entry, rb_node);
- if (offset < entry->start)
- p = &(*p)->rb_left;
- else if (offset > entry->end)
- p = &(*p)->rb_right;
- else
- return parent;
- }
- entry = rb_entry(node, struct tree_entry, rb_node);
- entry->in_tree = 1;
- rb_link_node(node, parent, p);
- rb_insert_color(node, root);
- return NULL;
- }
- static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
- struct rb_node **prev_ret)
- {
- struct rb_node * n = root->rb_node;
- struct rb_node *prev = NULL;
- struct tree_entry *entry;
- struct tree_entry *prev_entry = NULL;
- while(n) {
- entry = rb_entry(n, struct tree_entry, rb_node);
- prev = n;
- prev_entry = entry;
- if (offset < entry->start)
- n = n->rb_left;
- else if (offset > entry->end)
- n = n->rb_right;
- else
- return n;
- }
- if (!prev_ret)
- return NULL;
- while(prev && offset > prev_entry->end) {
- prev = rb_next(prev);
- prev_entry = rb_entry(prev, struct tree_entry, rb_node);
- }
- *prev_ret = prev;
- return NULL;
- }
- static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
- {
- struct rb_node *prev;
- struct rb_node *ret;
- ret = __tree_search(root, offset, &prev);
- if (!ret)
- return prev;
- return ret;
- }
- static int tree_delete(struct rb_root *root, u64 offset)
- {
- struct rb_node *node;
- struct tree_entry *entry;
- node = __tree_search(root, offset, NULL);
- if (!node)
- return -ENOENT;
- entry = rb_entry(node, struct tree_entry, rb_node);
- entry->in_tree = 0;
- rb_erase(node, root);
- return 0;
- }
- /*
- * add_extent_mapping tries a simple backward merge with existing
- * mappings. The extent_map struct passed in will be inserted into
- * the tree directly (no copies made, just a reference taken).
- */
- int add_extent_mapping(struct extent_map_tree *tree,
- struct extent_map *em)
- {
- int ret = 0;
- struct extent_map *prev = NULL;
- struct rb_node *rb;
- write_lock_irq(&tree->lock);
- rb = tree_insert(&tree->map, em->end, &em->rb_node);
- if (rb) {
- prev = rb_entry(rb, struct extent_map, rb_node);
- printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
- ret = -EEXIST;
- goto out;
- }
- atomic_inc(&em->refs);
- if (em->start != 0) {
- rb = rb_prev(&em->rb_node);
- if (rb)
- prev = rb_entry(rb, struct extent_map, rb_node);
- if (prev && prev->end + 1 == em->start &&
- ((em->block_start == EXTENT_MAP_HOLE &&
- prev->block_start == EXTENT_MAP_HOLE) ||
- (em->block_start == prev->block_end + 1))) {
- em->start = prev->start;
- em->block_start = prev->block_start;
- rb_erase(&prev->rb_node, &tree->map);
- prev->in_tree = 0;
- free_extent_map(prev);
- }
- }
- out:
- write_unlock_irq(&tree->lock);
- return ret;
- }
- EXPORT_SYMBOL(add_extent_mapping);
- /*
- * lookup_extent_mapping returns the first extent_map struct in the
- * tree that intersects the [start, end] (inclusive) range. There may
- * be additional objects in the tree that intersect, so check the object
- * returned carefully to make sure you don't need additional lookups.
- */
- struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
- u64 start, u64 end)
- {
- struct extent_map *em;
- struct rb_node *rb_node;
- read_lock_irq(&tree->lock);
- rb_node = tree_search(&tree->map, start);
- if (!rb_node) {
- em = NULL;
- goto out;
- }
- if (IS_ERR(rb_node)) {
- em = ERR_PTR(PTR_ERR(rb_node));
- goto out;
- }
- em = rb_entry(rb_node, struct extent_map, rb_node);
- if (em->end < start || em->start > end) {
- em = NULL;
- goto out;
- }
- atomic_inc(&em->refs);
- out:
- read_unlock_irq(&tree->lock);
- return em;
- }
- EXPORT_SYMBOL(lookup_extent_mapping);
- /*
- * removes an extent_map struct from the tree. No reference counts are
- * dropped, and no checks are done to see if the range is in use
- */
- int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
- {
- int ret;
- write_lock_irq(&tree->lock);
- ret = tree_delete(&tree->map, em->end);
- write_unlock_irq(&tree->lock);
- return ret;
- }
- EXPORT_SYMBOL(remove_extent_mapping);
- /*
- * utility function to look for merge candidates inside a given range.
- * Any extents with matching state are merged together into a single
- * extent in the tree. Extents with EXTENT_IO in their state field
- * are not merged because the end_io handlers need to be able to do
- * operations on them without sleeping (or doing allocations/splits).
- *
- * This should be called with the tree lock held.
- */
- static int merge_state(struct extent_map_tree *tree,
- struct extent_state *state)
- {
- struct extent_state *other;
- struct rb_node *other_node;
- if (state->state & EXTENT_IOBITS)
- return 0;
- other_node = rb_prev(&state->rb_node);
- if (other_node) {
- other = rb_entry(other_node, struct extent_state, rb_node);
- if (other->end == state->start - 1 &&
- other->state == state->state) {
- state->start = other->start;
- other->in_tree = 0;
- rb_erase(&other->rb_node, &tree->state);
- free_extent_state(other);
- }
- }
- other_node = rb_next(&state->rb_node);
- if (other_node) {
- other = rb_entry(other_node, struct extent_state, rb_node);
- if (other->start == state->end + 1 &&
- other->state == state->state) {
- other->start = state->start;
- state->in_tree = 0;
- rb_erase(&state->rb_node, &tree->state);
- free_extent_state(state);
- }
- }
- return 0;
- }
- /*
- * insert an extent_state struct into the tree. 'bits' are set on the
- * struct before it is inserted.
- *
- * This may return -EEXIST if the extent is already there, in which case the
- * state struct is freed.
- *
- * The tree lock is not taken internally. This is a utility function and
- * probably isn't what you want to call (see set/clear_extent_bit).
- */
- static int insert_state(struct extent_map_tree *tree,
- struct extent_state *state, u64 start, u64 end,
- int bits)
- {
- struct rb_node *node;
- if (end < start) {
- printk("end < start %Lu %Lu\n", end, start);
- WARN_ON(1);
- }
- state->state |= bits;
- state->start = start;
- state->end = end;
- node = tree_insert(&tree->state, end, &state->rb_node);
- if (node) {
- struct extent_state *found;
- found = rb_entry(node, struct extent_state, rb_node);
- printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
- free_extent_state(state);
- return -EEXIST;
- }
- merge_state(tree, state);
- return 0;
- }
- /*
- * split a given extent state struct in two, inserting the preallocated
- * struct 'prealloc' as the newly created second half. 'split' indicates an
- * offset inside 'orig' where it should be split.
- *
- * Before calling,
- * the tree has 'orig' at [orig->start, orig->end]. After calling, there
- * are two extent state structs in the tree:
- * prealloc: [orig->start, split - 1]
- * orig: [ split, orig->end ]
- *
- * The tree locks are not taken by this function. They need to be held
- * by the caller.
- */
- static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
- struct extent_state *prealloc, u64 split)
- {
- struct rb_node *node;
- prealloc->start = orig->start;
- prealloc->end = split - 1;
- prealloc->state = orig->state;
- orig->start = split;
- node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
- if (node) {
- struct extent_state *found;
- found = rb_entry(node, struct extent_state, rb_node);
- printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
- free_extent_state(prealloc);
- return -EEXIST;
- }
- return 0;
- }
- /*
- * utility function to clear some bits in an extent state struct.
- * it will optionally wake up any one waiting on this state (wake == 1), or
- * forcibly remove the state from the tree (delete == 1).
- *
- * If no bits are set on the state struct after clearing things, the
- * struct is freed and removed from the tree
- */
- static int clear_state_bit(struct extent_map_tree *tree,
- struct extent_state *state, int bits, int wake,
- int delete)
- {
- int ret = state->state & bits;
- state->state &= ~bits;
- if (wake)
- wake_up(&state->wq);
- if (delete || state->state == 0) {
- if (state->in_tree) {
- rb_erase(&state->rb_node, &tree->state);
- state->in_tree = 0;
- free_extent_state(state);
- } else {
- WARN_ON(1);
- }
- } else {
- merge_state(tree, state);
- }
- return ret;
- }
- /*
- * clear some bits on a range in the tree. This may require splitting
- * or inserting elements in the tree, so the gfp mask is used to
- * indicate which allocations or sleeping are allowed.
- *
- * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
- * the given range from the tree regardless of state (ie for truncate).
- *
- * the range [start, end] is inclusive.
- *
- * This takes the tree lock, and returns < 0 on error, > 0 if any of the
- * bits were already set, or zero if none of the bits were already set.
- */
- int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
- int bits, int wake, int delete, gfp_t mask)
- {
- struct extent_state *state;
- struct extent_state *prealloc = NULL;
- struct rb_node *node;
- unsigned long flags;
- int err;
- int set = 0;
- again:
- if (!prealloc && (mask & __GFP_WAIT)) {
- prealloc = alloc_extent_state(mask);
- if (!prealloc)
- return -ENOMEM;
- }
- write_lock_irqsave(&tree->lock, flags);
- /*
- * this search will find the extents that end after
- * our range starts
- */
- node = tree_search(&tree->state, start);
- if (!node)
- goto out;
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start > end)
- goto out;
- WARN_ON(state->end < start);
- /*
- * | ---- desired range ---- |
- * | state | or
- * | ------------- state -------------- |
- *
- * We need to split the extent we found, and may flip
- * bits on second half.
- *
- * If the extent we found extends past our range, we
- * just split and search again. It'll get split again
- * the next time though.
- *
- * If the extent we found is inside our range, we clear
- * the desired bit on it.
- */
- if (state->start < start) {
- err = split_state(tree, state, prealloc, start);
- BUG_ON(err == -EEXIST);
- prealloc = NULL;
- if (err)
- goto out;
- if (state->end <= end) {
- start = state->end + 1;
- set |= clear_state_bit(tree, state, bits,
- wake, delete);
- } else {
- start = state->start;
- }
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state |
- * We need to split the extent, and clear the bit
- * on the first half
- */
- if (state->start <= end && state->end > end) {
- err = split_state(tree, state, prealloc, end + 1);
- BUG_ON(err == -EEXIST);
- if (wake)
- wake_up(&state->wq);
- set |= clear_state_bit(tree, prealloc, bits,
- wake, delete);
- prealloc = NULL;
- goto out;
- }
- start = state->end + 1;
- set |= clear_state_bit(tree, state, bits, wake, delete);
- goto search_again;
- out:
- write_unlock_irqrestore(&tree->lock, flags);
- if (prealloc)
- free_extent_state(prealloc);
- return set;
- search_again:
- if (start > end)
- goto out;
- write_unlock_irqrestore(&tree->lock, flags);
- if (mask & __GFP_WAIT)
- cond_resched();
- goto again;
- }
- EXPORT_SYMBOL(clear_extent_bit);
- static int wait_on_state(struct extent_map_tree *tree,
- struct extent_state *state)
- {
- DEFINE_WAIT(wait);
- prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
- read_unlock_irq(&tree->lock);
- schedule();
- read_lock_irq(&tree->lock);
- finish_wait(&state->wq, &wait);
- return 0;
- }
- /*
- * waits for one or more bits to clear on a range in the state tree.
- * The range [start, end] is inclusive.
- * The tree lock is taken by this function
- */
- int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
- {
- struct extent_state *state;
- struct rb_node *node;
- read_lock_irq(&tree->lock);
- again:
- while (1) {
- /*
- * this search will find all the extents that end after
- * our range starts
- */
- node = tree_search(&tree->state, start);
- if (!node)
- break;
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start > end)
- goto out;
- if (state->state & bits) {
- start = state->start;
- atomic_inc(&state->refs);
- wait_on_state(tree, state);
- free_extent_state(state);
- goto again;
- }
- start = state->end + 1;
- if (start > end)
- break;
- if (need_resched()) {
- read_unlock_irq(&tree->lock);
- cond_resched();
- read_lock_irq(&tree->lock);
- }
- }
- out:
- read_unlock_irq(&tree->lock);
- return 0;
- }
- EXPORT_SYMBOL(wait_extent_bit);
- /*
- * set some bits on a range in the tree. This may require allocations
- * or sleeping, so the gfp mask is used to indicate what is allowed.
- *
- * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
- * range already has the desired bits set. The start of the existing
- * range is returned in failed_start in this case.
- *
- * [start, end] is inclusive
- * This takes the tree lock.
- */
- int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
- int exclusive, u64 *failed_start, gfp_t mask)
- {
- struct extent_state *state;
- struct extent_state *prealloc = NULL;
- struct rb_node *node;
- unsigned long flags;
- int err = 0;
- int set;
- u64 last_start;
- u64 last_end;
- again:
- if (!prealloc && (mask & __GFP_WAIT)) {
- prealloc = alloc_extent_state(mask);
- if (!prealloc)
- return -ENOMEM;
- }
- write_lock_irqsave(&tree->lock, flags);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(&tree->state, start);
- if (!node) {
- err = insert_state(tree, prealloc, start, end, bits);
- prealloc = NULL;
- BUG_ON(err == -EEXIST);
- goto out;
- }
- state = rb_entry(node, struct extent_state, rb_node);
- last_start = state->start;
- last_end = state->end;
- /*
- * | ---- desired range ---- |
- * | state |
- *
- * Just lock what we found and keep going
- */
- if (state->start == start && state->end <= end) {
- set = state->state & bits;
- if (set && exclusive) {
- *failed_start = state->start;
- err = -EEXIST;
- goto out;
- }
- state->state |= bits;
- start = state->end + 1;
- merge_state(tree, state);
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state |
- * or
- * | ------------- state -------------- |
- *
- * We need to split the extent we found, and may flip bits on
- * second half.
- *
- * If the extent we found extends past our
- * range, we just split and search again. It'll get split
- * again the next time though.
- *
- * If the extent we found is inside our range, we set the
- * desired bit on it.
- */
- if (state->start < start) {
- set = state->state & bits;
- if (exclusive && set) {
- *failed_start = start;
- err = -EEXIST;
- goto out;
- }
- err = split_state(tree, state, prealloc, start);
- BUG_ON(err == -EEXIST);
- prealloc = NULL;
- if (err)
- goto out;
- if (state->end <= end) {
- state->state |= bits;
- start = state->end + 1;
- merge_state(tree, state);
- } else {
- start = state->start;
- }
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state | or | state |
- *
- * There's a hole, we need to insert something in it and
- * ignore the extent we found.
- */
- if (state->start > start) {
- u64 this_end;
- if (end < last_start)
- this_end = end;
- else
- this_end = last_start -1;
- err = insert_state(tree, prealloc, start, this_end,
- bits);
- prealloc = NULL;
- BUG_ON(err == -EEXIST);
- if (err)
- goto out;
- start = this_end + 1;
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state |
- * We need to split the extent, and set the bit
- * on the first half
- */
- if (state->start <= end && state->end > end) {
- set = state->state & bits;
- if (exclusive && set) {
- *failed_start = start;
- err = -EEXIST;
- goto out;
- }
- err = split_state(tree, state, prealloc, end + 1);
- BUG_ON(err == -EEXIST);
- prealloc->state |= bits;
- merge_state(tree, prealloc);
- prealloc = NULL;
- goto out;
- }
- goto search_again;
- out:
- write_unlock_irqrestore(&tree->lock, flags);
- if (prealloc)
- free_extent_state(prealloc);
- return err;
- search_again:
- if (start > end)
- goto out;
- write_unlock_irqrestore(&tree->lock, flags);
- if (mask & __GFP_WAIT)
- cond_resched();
- goto again;
- }
- EXPORT_SYMBOL(set_extent_bit);
- /* wrappers around set/clear extent bit */
- int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
- mask);
- }
- EXPORT_SYMBOL(set_extent_dirty);
- int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
- int bits, gfp_t mask)
- {
- return set_extent_bit(tree, start, end, bits, 0, NULL,
- mask);
- }
- EXPORT_SYMBOL(set_extent_bits);
- int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
- int bits, gfp_t mask)
- {
- return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
- }
- EXPORT_SYMBOL(clear_extent_bits);
- int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
- mask);
- }
- EXPORT_SYMBOL(set_extent_delalloc);
- int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return clear_extent_bit(tree, start, end,
- EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
- }
- EXPORT_SYMBOL(clear_extent_dirty);
- int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
- mask);
- }
- EXPORT_SYMBOL(set_extent_new);
- int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
- }
- EXPORT_SYMBOL(clear_extent_new);
- int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
- mask);
- }
- EXPORT_SYMBOL(set_extent_uptodate);
- int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
- }
- EXPORT_SYMBOL(clear_extent_uptodate);
- int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
- 0, NULL, mask);
- }
- EXPORT_SYMBOL(set_extent_writeback);
- int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
- }
- EXPORT_SYMBOL(clear_extent_writeback);
- int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
- {
- return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
- }
- EXPORT_SYMBOL(wait_on_extent_writeback);
- /*
- * locks a range in ascending order, waiting for any locked regions
- * it hits on the way. [start,end] are inclusive, and this will sleep.
- */
- int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
- {
- int err;
- u64 failed_start;
- while (1) {
- err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
- &failed_start, mask);
- if (err == -EEXIST && (mask & __GFP_WAIT)) {
- wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
- start = failed_start;
- } else {
- break;
- }
- WARN_ON(start > end);
- }
- return err;
- }
- EXPORT_SYMBOL(lock_extent);
- int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
- }
- EXPORT_SYMBOL(unlock_extent);
- /*
- * helper function to set pages and extents in the tree dirty
- */
- int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
- {
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
- struct page *page;
- while (index <= end_index) {
- page = find_get_page(tree->mapping, index);
- BUG_ON(!page);
- __set_page_dirty_nobuffers(page);
- page_cache_release(page);
- index++;
- }
- set_extent_dirty(tree, start, end, GFP_NOFS);
- return 0;
- }
- EXPORT_SYMBOL(set_range_dirty);
- /*
- * helper function to set both pages and extents in the tree writeback
- */
- int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
- {
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
- struct page *page;
- while (index <= end_index) {
- page = find_get_page(tree->mapping, index);
- BUG_ON(!page);
- set_page_writeback(page);
- page_cache_release(page);
- index++;
- }
- set_extent_writeback(tree, start, end, GFP_NOFS);
- return 0;
- }
- EXPORT_SYMBOL(set_range_writeback);
- int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, int bits)
- {
- struct rb_node *node;
- struct extent_state *state;
- int ret = 1;
- write_lock_irq(&tree->lock);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(&tree->state, start);
- if (!node || IS_ERR(node)) {
- goto out;
- }
- while(1) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->state & bits) {
- *start_ret = state->start;
- *end_ret = state->end;
- ret = 0;
- break;
- }
- node = rb_next(node);
- if (!node)
- break;
- }
- out:
- write_unlock_irq(&tree->lock);
- return ret;
- }
- EXPORT_SYMBOL(find_first_extent_bit);
- u64 find_lock_delalloc_range(struct extent_map_tree *tree,
- u64 start, u64 lock_start, u64 *end, u64 max_bytes)
- {
- struct rb_node *node;
- struct extent_state *state;
- u64 cur_start = start;
- u64 found = 0;
- u64 total_bytes = 0;
- write_lock_irq(&tree->lock);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- search_again:
- node = tree_search(&tree->state, cur_start);
- if (!node || IS_ERR(node)) {
- goto out;
- }
- while(1) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start != cur_start) {
- goto out;
- }
- if (!(state->state & EXTENT_DELALLOC)) {
- goto out;
- }
- if (state->start >= lock_start) {
- if (state->state & EXTENT_LOCKED) {
- DEFINE_WAIT(wait);
- atomic_inc(&state->refs);
- write_unlock_irq(&tree->lock);
- schedule();
- write_lock_irq(&tree->lock);
- finish_wait(&state->wq, &wait);
- free_extent_state(state);
- goto search_again;
- }
- state->state |= EXTENT_LOCKED;
- }
- found++;
- *end = state->end;
- cur_start = state->end + 1;
- node = rb_next(node);
- if (!node)
- break;
- total_bytes = state->end - state->start + 1;
- if (total_bytes >= max_bytes)
- break;
- }
- out:
- write_unlock_irq(&tree->lock);
- return found;
- }
- /*
- * helper function to lock both pages and extents in the tree.
- * pages must be locked first.
- */
- int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
- {
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
- struct page *page;
- int err;
- while (index <= end_index) {
- page = grab_cache_page(tree->mapping, index);
- if (!page) {
- err = -ENOMEM;
- goto failed;
- }
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto failed;
- }
- index++;
- }
- lock_extent(tree, start, end, GFP_NOFS);
- return 0;
- failed:
- /*
- * we failed above in getting the page at 'index', so we undo here
- * up to but not including the page at 'index'
- */
- end_index = index;
- index = start >> PAGE_CACHE_SHIFT;
- while (index < end_index) {
- page = find_get_page(tree->mapping, index);
- unlock_page(page);
- page_cache_release(page);
- index++;
- }
- return err;
- }
- EXPORT_SYMBOL(lock_range);
- /*
- * helper function to unlock both pages and extents in the tree.
- */
- int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
- {
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
- struct page *page;
- while (index <= end_index) {
- page = find_get_page(tree->mapping, index);
- unlock_page(page);
- page_cache_release(page);
- index++;
- }
- unlock_extent(tree, start, end, GFP_NOFS);
- return 0;
- }
- EXPORT_SYMBOL(unlock_range);
- int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
- {
- struct rb_node *node;
- struct extent_state *state;
- int ret = 0;
- write_lock_irq(&tree->lock);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(&tree->state, start);
- if (!node || IS_ERR(node)) {
- ret = -ENOENT;
- goto out;
- }
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start != start) {
- ret = -ENOENT;
- goto out;
- }
- state->private = private;
- out:
- write_unlock_irq(&tree->lock);
- return ret;
- }
- int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
- {
- struct rb_node *node;
- struct extent_state *state;
- int ret = 0;
- read_lock_irq(&tree->lock);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(&tree->state, start);
- if (!node || IS_ERR(node)) {
- ret = -ENOENT;
- goto out;
- }
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start != start) {
- ret = -ENOENT;
- goto out;
- }
- *private = state->private;
- out:
- read_unlock_irq(&tree->lock);
- return ret;
- }
- /*
- * searches a range in the state tree for a given mask.
- * If 'filled' == 1, this returns 1 only if ever extent in the tree
- * has the bits set. Otherwise, 1 is returned if any bit in the
- * range is found set.
- */
- int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
- int bits, int filled)
- {
- struct extent_state *state = NULL;
- struct rb_node *node;
- int bitset = 0;
- read_lock_irq(&tree->lock);
- node = tree_search(&tree->state, start);
- while (node && start <= end) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start > end)
- break;
- if (filled && state->start > start) {
- bitset = 0;
- break;
- }
- if (state->state & bits) {
- bitset = 1;
- if (!filled)
- break;
- } else if (filled) {
- bitset = 0;
- break;
- }
- start = state->end + 1;
- if (start > end)
- break;
- node = rb_next(node);
- }
- read_unlock_irq(&tree->lock);
- return bitset;
- }
- EXPORT_SYMBOL(test_range_bit);
- /*
- * helper function to set a given page up to date if all the
- * extents in the tree for that page are up to date
- */
- static int check_page_uptodate(struct extent_map_tree *tree,
- struct page *page)
- {
- u64 start = page->index << PAGE_CACHE_SHIFT;
- u64 end = start + PAGE_CACHE_SIZE - 1;
- if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
- SetPageUptodate(page);
- return 0;
- }
- /*
- * helper function to unlock a page if all the extents in the tree
- * for that page are unlocked
- */
- static int check_page_locked(struct extent_map_tree *tree,
- struct page *page)
- {
- u64 start = page->index << PAGE_CACHE_SHIFT;
- u64 end = start + PAGE_CACHE_SIZE - 1;
- if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
- unlock_page(page);
- return 0;
- }
- /*
- * helper function to end page writeback if all the extents
- * in the tree for that page are done with writeback
- */
- static int check_page_writeback(struct extent_map_tree *tree,
- struct page *page)
- {
- u64 start = page->index << PAGE_CACHE_SHIFT;
- u64 end = start + PAGE_CACHE_SIZE - 1;
- if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
- end_page_writeback(page);
- return 0;
- }
- /* lots and lots of room for performance fixes in the end_bio funcs */
- /*
- * after a writepage IO is done, we need to:
- * clear the uptodate bits on error
- * clear the writeback bits in the extent tree for this IO
- * end_page_writeback if the page has no more pending IO
- *
- * Scheduling is not allowed, so the extent state tree is expected
- * to have one and only one object corresponding to this IO.
- */
- static int end_bio_extent_writepage(struct bio *bio,
- unsigned int bytes_done, int err)
- {
- const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct extent_map_tree *tree = bio->bi_private;
- u64 start;
- u64 end;
- int whole_page;
- if (bio->bi_size)
- return 1;
- do {
- struct page *page = bvec->bv_page;
- start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
- end = start + bvec->bv_len - 1;
- if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
- whole_page = 1;
- else
- whole_page = 0;
- if (--bvec >= bio->bi_io_vec)
- prefetchw(&bvec->bv_page->flags);
- if (!uptodate) {
- clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
- ClearPageUptodate(page);
- SetPageError(page);
- }
- clear_extent_writeback(tree, start, end, GFP_ATOMIC);
- if (whole_page)
- end_page_writeback(page);
- else
- check_page_writeback(tree, page);
- if (tree->ops && tree->ops->writepage_end_io_hook)
- tree->ops->writepage_end_io_hook(page, start, end);
- } while (bvec >= bio->bi_io_vec);
- bio_put(bio);
- return 0;
- }
- /*
- * after a readpage IO is done, we need to:
- * clear the uptodate bits on error
- * set the uptodate bits if things worked
- * set the page up to date if all extents in the tree are uptodate
- * clear the lock bit in the extent tree
- * unlock the page if there are no other extents locked for it
- *
- * Scheduling is not allowed, so the extent state tree is expected
- * to have one and only one object corresponding to this IO.
- */
- static int end_bio_extent_readpage(struct bio *bio,
- unsigned int bytes_done, int err)
- {
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct extent_map_tree *tree = bio->bi_private;
- u64 start;
- u64 end;
- int whole_page;
- int ret;
- if (bio->bi_size)
- return 1;
- do {
- struct page *page = bvec->bv_page;
- start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
- end = start + bvec->bv_len - 1;
- if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
- whole_page = 1;
- else
- whole_page = 0;
- if (--bvec >= bio->bi_io_vec)
- prefetchw(&bvec->bv_page->flags);
- if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
- ret = tree->ops->readpage_end_io_hook(page, start, end);
- if (ret)
- uptodate = 0;
- }
- if (uptodate) {
- set_extent_uptodate(tree, start, end, GFP_ATOMIC);
- if (whole_page)
- SetPageUptodate(page);
- else
- check_page_uptodate(tree, page);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- unlock_extent(tree, start, end, GFP_ATOMIC);
- if (whole_page)
- unlock_page(page);
- else
- check_page_locked(tree, page);
- } while (bvec >= bio->bi_io_vec);
- bio_put(bio);
- return 0;
- }
- /*
- * IO done from prepare_write is pretty simple, we just unlock
- * the structs in the extent tree when done, and set the uptodate bits
- * as appropriate.
- */
- static int end_bio_extent_preparewrite(struct bio *bio,
- unsigned int bytes_done, int err)
- {
- const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct extent_map_tree *tree = bio->bi_private;
- u64 start;
- u64 end;
- if (bio->bi_size)
- return 1;
- do {
- struct page *page = bvec->bv_page;
- start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
- end = start + bvec->bv_len - 1;
- if (--bvec >= bio->bi_io_vec)
- prefetchw(&bvec->bv_page->flags);
- if (uptodate) {
- set_extent_uptodate(tree, start, end, GFP_ATOMIC);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- unlock_extent(tree, start, end, GFP_ATOMIC);
- } while (bvec >= bio->bi_io_vec);
- bio_put(bio);
- return 0;
- }
- static int submit_extent_page(int rw, struct extent_map_tree *tree,
- struct page *page, sector_t sector,
- size_t size, unsigned long offset,
- struct block_device *bdev,
- bio_end_io_t end_io_func)
- {
- struct bio *bio;
- int ret = 0;
- bio = bio_alloc(GFP_NOIO, 1);
- bio->bi_sector = sector;
- bio->bi_bdev = bdev;
- bio->bi_io_vec[0].bv_page = page;
- bio->bi_io_vec[0].bv_len = size;
- bio->bi_io_vec[0].bv_offset = offset;
- bio->bi_vcnt = 1;
- bio->bi_idx = 0;
- bio->bi_size = size;
- bio->bi_end_io = end_io_func;
- bio->bi_private = tree;
- bio_get(bio);
- submit_bio(rw, bio);
- if (bio_flagged(bio, BIO_EOPNOTSUPP))
- ret = -EOPNOTSUPP;
- bio_put(bio);
- return ret;
- }
- void set_page_extent_mapped(struct page *page)
- {
- if (!PagePrivate(page)) {
- SetPagePrivate(page);
- WARN_ON(!page->mapping->a_ops->invalidatepage);
- set_page_private(page, 1);
- page_cache_get(page);
- }
- }
- /*
- * basic readpage implementation. Locked extent state structs are inserted
- * into the tree that are removed when the IO is done (by the end_io
- * handlers)
- */
- int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
- get_extent_t *get_extent)
- {
- struct inode *inode = page->mapping->host;
- u64 start = page->index << PAGE_CACHE_SHIFT;
- u64 page_end = start + PAGE_CACHE_SIZE - 1;
- u64 end;
- u64 cur = start;
- u64 extent_offset;
- u64 last_byte = i_size_read(inode);
- u64 block_start;
- u64 cur_end;
- sector_t sector;
- struct extent_map *em;
- struct block_device *bdev;
- int ret;
- int nr = 0;
- size_t page_offset = 0;
- size_t iosize;
- size_t blocksize = inode->i_sb->s_blocksize;
- set_page_extent_mapped(page);
- end = page_end;
- lock_extent(tree, start, end, GFP_NOFS);
- while (cur <= end) {
- if (cur >= last_byte) {
- iosize = PAGE_CACHE_SIZE - page_offset;
- zero_user_page(page, page_offset, iosize, KM_USER0);
- set_extent_uptodate(tree, cur, cur + iosize - 1,
- GFP_NOFS);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
- break;
- }
- em = get_extent(inode, page, page_offset, cur, end, 0);
- if (IS_ERR(em) || !em) {
- SetPageError(page);
- unlock_extent(tree, cur, end, GFP_NOFS);
- break;
- }
- extent_offset = cur - em->start;
- BUG_ON(em->end < cur);
- BUG_ON(end < cur);
- iosize = min(em->end - cur, end - cur) + 1;
- cur_end = min(em->end, end);
- iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
- sector = (em->block_start + extent_offset) >> 9;
- bdev = em->bdev;
- block_start = em->block_start;
- free_extent_map(em);
- em = NULL;
- /* we've found a hole, just zero and go on */
- if (block_start == EXTENT_MAP_HOLE) {
- zero_user_page(page, page_offset, iosize, KM_USER0);
- set_extent_uptodate(tree, cur, cur + iosize - 1,
- GFP_NOFS);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
- cur = cur + iosize;
- page_offset += iosize;
- continue;
- }
- /* the get_extent function already copied into the page */
- if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
- cur = cur + iosize;
- page_offset += iosize;
- continue;
- }
- ret = 0;
- if (tree->ops && tree->ops->readpage_io_hook) {
- ret = tree->ops->readpage_io_hook(page, cur,
- cur + iosize - 1);
- }
- if (!ret) {
- ret = submit_extent_page(READ, tree, page,
- sector, iosize, page_offset,
- bdev, end_bio_extent_readpage);
- }
- if (ret)
- SetPageError(page);
- cur = cur + iosize;
- page_offset += iosize;
- nr++;
- }
- if (!nr) {
- if (!PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
- }
- return 0;
- }
- EXPORT_SYMBOL(extent_read_full_page);
- /*
- * the writepage semantics are similar to regular writepage. extent
- * records are inserted to lock ranges in the tree, and as dirty areas
- * are found, they are marked writeback. Then the lock bits are removed
- * and the end_io handler clears the writeback ranges
- */
- int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
- get_extent_t *get_extent,
- struct writeback_control *wbc)
- {
- struct inode *inode = page->mapping->host;
- u64 start = page->index << PAGE_CACHE_SHIFT;
- u64 page_end = start + PAGE_CACHE_SIZE - 1;
- u64 end;
- u64 cur = start;
- u64 extent_offset;
- u64 last_byte = i_size_read(inode);
- u64 block_start;
- sector_t sector;
- struct extent_map *em;
- struct block_device *bdev;
- int ret;
- int nr = 0;
- size_t page_offset = 0;
- size_t iosize;
- size_t blocksize;
- loff_t i_size = i_size_read(inode);
- unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
- u64 nr_delalloc;
- u64 delalloc_end;
- WARN_ON(!PageLocked(page));
- if (page->index > end_index) {
- clear_extent_dirty(tree, start, page_end, GFP_NOFS);
- unlock_page(page);
- return 0;
- }
- if (page->index == end_index) {
- size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
- zero_user_page(page, offset,
- PAGE_CACHE_SIZE - offset, KM_USER0);
- }
- set_page_extent_mapped(page);
- lock_extent(tree, start, page_end, GFP_NOFS);
- nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
- &delalloc_end,
- 128 * 1024 * 1024);
- if (nr_delalloc) {
- tree->ops->fill_delalloc(inode, start, delalloc_end);
- if (delalloc_end >= page_end + 1) {
- clear_extent_bit(tree, page_end + 1, delalloc_end,
- EXTENT_LOCKED | EXTENT_DELALLOC,
- 1, 0, GFP_NOFS);
- }
- clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
- 0, 0, GFP_NOFS);
- if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
- printk("found delalloc bits after clear extent_bit\n");
- }
- } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
- printk("found delalloc bits after find_delalloc_range returns 0\n");
- }
- end = page_end;
- if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
- printk("found delalloc bits after lock_extent\n");
- }
- if (last_byte <= start) {
- clear_extent_dirty(tree, start, page_end, GFP_NOFS);
- goto done;
- }
- set_extent_uptodate(tree, start, page_end, GFP_NOFS);
- blocksize = inode->i_sb->s_blocksize;
- while (cur <= end) {
- if (cur >= last_byte) {
- clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
- break;
- }
- em = get_extent(inode, page, page_offset, cur, end, 0);
- if (IS_ERR(em) || !em) {
- SetPageError(page);
- break;
- }
- extent_offset = cur - em->start;
- BUG_ON(em->end < cur);
- BUG_ON(end < cur);
- iosize = min(em->end - cur, end - cur) + 1;
- iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
- sector = (em->block_start + extent_offset) >> 9;
- bdev = em->bdev;
- block_start = em->block_start;
- free_extent_map(em);
- em = NULL;
- if (block_start == EXTENT_MAP_HOLE ||
- block_start == EXTENT_MAP_INLINE) {
- clear_extent_dirty(tree, cur,
- cur + iosize - 1, GFP_NOFS);
- cur = cur + iosize;
- page_offset += iosize;
- continue;
- }
- /* leave this out until we have a page_mkwrite call */
- if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
- EXTENT_DIRTY, 0)) {
- cur = cur + iosize;
- page_offset += iosize;
- continue;
- }
- clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
- if (tree->ops && tree->ops->writepage_io_hook) {
- ret = tree->ops->writepage_io_hook(page, cur,
- cur + iosize - 1);
- } else {
- ret = 0;
- }
- if (ret)
- SetPageError(page);
- else {
- set_range_writeback(tree, cur, cur + iosize - 1);
- ret = submit_extent_page(WRITE, tree, page, sector,
- iosize, page_offset, bdev,
- end_bio_extent_writepage);
- if (ret)
- SetPageError(page);
- }
- cur = cur + iosize;
- page_offset += iosize;
- nr++;
- }
- done:
- unlock_extent(tree, start, page_end, GFP_NOFS);
- unlock_page(page);
- return 0;
- }
- EXPORT_SYMBOL(extent_write_full_page);
- /*
- * basic invalidatepage code, this waits on any locked or writeback
- * ranges corresponding to the page, and then deletes any extent state
- * records from the tree
- */
- int extent_invalidatepage(struct extent_map_tree *tree,
- struct page *page, unsigned long offset)
- {
- u64 start = (page->index << PAGE_CACHE_SHIFT);
- u64 end = start + PAGE_CACHE_SIZE - 1;
- size_t blocksize = page->mapping->host->i_sb->s_blocksize;
- start += (offset + blocksize -1) & ~(blocksize - 1);
- if (start > end)
- return 0;
- lock_extent(tree, start, end, GFP_NOFS);
- wait_on_extent_writeback(tree, start, end);
- clear_extent_bit(tree, start, end,
- EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
- 1, 1, GFP_NOFS);
- return 0;
- }
- EXPORT_SYMBOL(extent_invalidatepage);
- /*
- * simple commit_write call, set_range_dirty is used to mark both
- * the pages and the extent records as dirty
- */
- int extent_commit_write(struct extent_map_tree *tree,
- struct inode *inode, struct page *page,
- unsigned from, unsigned to)
- {
- loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
- set_page_extent_mapped(page);
- set_page_dirty(page);
- if (pos > inode->i_size) {
- i_size_write(inode, pos);
- mark_inode_dirty(inode);
- }
- return 0;
- }
- EXPORT_SYMBOL(extent_commit_write);
- int extent_prepare_write(struct extent_map_tree *tree,
- struct inode *inode, struct page *page,
- unsigned from, unsigned to, get_extent_t *get_extent)
- {
- u64 page_start = page->index << PAGE_CACHE_SHIFT;
- u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
- u64 block_start;
- u64 orig_block_start;
- u64 block_end;
- u64 cur_end;
- struct extent_map *em;
- unsigned blocksize = 1 << inode->i_blkbits;
- size_t page_offset = 0;
- size_t block_off_start;
- size_t block_off_end;
- int err = 0;
- int iocount = 0;
- int ret = 0;
- int isnew;
- set_page_extent_mapped(page);
- block_start = (page_start + from) & ~((u64)blocksize - 1);
- block_end = (page_start + to - 1) | (blocksize - 1);
- orig_block_start = block_start;
- lock_extent(tree, page_start, page_end, GFP_NOFS);
- while(block_start <= block_end) {
- em = get_extent(inode, page, page_offset, block_start,
- block_end, 1);
- if (IS_ERR(em) || !em) {
- goto err;
- }
- cur_end = min(block_end, em->end);
- block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
- block_off_end = block_off_start + blocksize;
- isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
- if (!PageUptodate(page) && isnew &&
- (block_off_end > to || block_off_start < from)) {
- void *kaddr;
- kaddr = kmap_atomic(page, KM_USER0);
- if (block_off_end > to)
- memset(kaddr + to, 0, block_off_end - to);
- if (block_off_start < from)
- memset(kaddr + block_off_start, 0,
- from - block_off_start);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- }
- if (!isnew && !PageUptodate(page) &&
- (block_off_end > to || block_off_start < from) &&
- !test_range_bit(tree, block_start, cur_end,
- EXTENT_UPTODATE, 1)) {
- u64 sector;
- u64 extent_offset = block_start - em->start;
- size_t iosize;
- sector = (em->block_start + extent_offset) >> 9;
- iosize = (cur_end - block_start + blocksize - 1) &
- ~((u64)blocksize - 1);
- /*
- * we've already got the extent locked, but we
- * need to split the state such that our end_bio
- * handler can clear the lock.
- */
- set_extent_bit(tree, block_start,
- block_start + iosize - 1,
- EXTENT_LOCKED, 0, NULL, GFP_NOFS);
- ret = submit_extent_page(READ, tree, page,
- sector, iosize, page_offset, em->bdev,
- end_bio_extent_preparewrite);
- iocount++;
- block_start = block_start + iosize;
- } else {
- set_extent_uptodate(tree, block_start, cur_end,
- GFP_NOFS);
- unlock_extent(tree, block_start, cur_end, GFP_NOFS);
- block_start = cur_end + 1;
- }
- page_offset = block_start & (PAGE_CACHE_SIZE - 1);
- free_extent_map(em);
- }
- if (iocount) {
- wait_extent_bit(tree, orig_block_start,
- block_end, EXTENT_LOCKED);
- }
- check_page_uptodate(tree, page);
- err:
- /* FIXME, zero out newly allocated blocks on error */
- return err;
- }
- EXPORT_SYMBOL(extent_prepare_write);
- /*
- * a helper for releasepage. As long as there are no locked extents
- * in the range corresponding to the page, both state records and extent
- * map records are removed
- */
- int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
- {
- struct extent_map *em;
- u64 start = page->index << PAGE_CACHE_SHIFT;
- u64 end = start + PAGE_CACHE_SIZE - 1;
- u64 orig_start = start;
- int ret = 1;
- while (start <= end) {
- em = lookup_extent_mapping(tree, start, end);
- if (!em || IS_ERR(em))
- break;
- if (!test_range_bit(tree, em->start, em->end,
- EXTENT_LOCKED, 0)) {
- remove_extent_mapping(tree, em);
- /* once for the rb tree */
- free_extent_map(em);
- }
- start = em->end + 1;
- /* once for us */
- free_extent_map(em);
- }
- if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
- ret = 0;
- else
- clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
- 1, 1, GFP_NOFS);
- return ret;
- }
- EXPORT_SYMBOL(try_release_extent_mapping);
- sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
- get_extent_t *get_extent)
- {
- struct inode *inode = mapping->host;
- u64 start = iblock << inode->i_blkbits;
- u64 end = start + (1 << inode->i_blkbits) - 1;
- struct extent_map *em;
- em = get_extent(inode, NULL, 0, start, end, 0);
- if (!em || IS_ERR(em))
- return 0;
- if (em->block_start == EXTENT_MAP_INLINE ||
- em->block_start == EXTENT_MAP_HOLE)
- return 0;
- return (em->block_start + start - em->start) >> inode->i_blkbits;
- }
- static struct extent_buffer *__alloc_extent_buffer(gfp_t mask)
- {
- struct extent_buffer *eb = NULL;
- spin_lock(&extent_buffers_lock);
- if (!list_empty(&extent_buffers)) {
- eb = list_entry(extent_buffers.next, struct extent_buffer,
- list);
- list_del(&eb->list);
- WARN_ON(nr_extent_buffers == 0);
- nr_extent_buffers--;
- }
- spin_unlock(&extent_buffers_lock);
- if (eb) {
- memset(eb, 0, sizeof(*eb));
- } else {
- eb = kmem_cache_zalloc(extent_buffer_cache, mask);
- }
- spin_lock(&extent_buffers_lock);
- list_add(&eb->leak_list, &buffers);
- spin_unlock(&extent_buffers_lock);
- return eb;
- }
- static void __free_extent_buffer(struct extent_buffer *eb)
- {
- spin_lock(&extent_buffers_lock);
- list_del_init(&eb->leak_list);
- spin_unlock(&extent_buffers_lock);
- if (nr_extent_buffers >= MAX_EXTENT_BUFFER_CACHE) {
- kmem_cache_free(extent_buffer_cache, eb);
- } else {
- spin_lock(&extent_buffers_lock);
- list_add(&eb->list, &extent_buffers);
- nr_extent_buffers++;
- spin_unlock(&extent_buffers_lock);
- }
- }
- static inline struct page *extent_buffer_page(struct extent_buffer *eb, int i)
- {
- struct page *p;
- if (i == 0)
- return eb->first_page;
- i += eb->start >> PAGE_CACHE_SHIFT;
- if (eb->last_page && eb->last_page->index == i)
- return eb->last_page;
- p = find_get_page(eb->first_page->mapping, i);
- page_cache_release(p);
- eb->last_page = p;
- return p;
- }
- static inline unsigned long num_extent_pages(u64 start, u64 len)
- {
- return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
- (start >> PAGE_CACHE_SHIFT);
- }
- struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
- u64 start, unsigned long len,
- gfp_t mask)
- {
- unsigned long num_pages = num_extent_pages(start, len);
- unsigned long i;
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- struct extent_buffer *eb;
- struct page *p;
- struct address_space *mapping = tree->mapping;
- int uptodate = 0;
- eb = __alloc_extent_buffer(mask);
- if (!eb || IS_ERR(eb))
- return NULL;
- eb->alloc_addr = (unsigned long)__builtin_return_address(0);
- eb->start = start;
- eb->len = len;
- atomic_set(&eb->refs, 1);
- for (i = 0; i < num_pages; i++, index++) {
- p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
- if (!p) {
- WARN_ON(1);
- /* make sure the free only frees the pages we've
- * grabbed a reference on
- */
- eb->len = i << PAGE_CACHE_SHIFT;
- eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
- goto fail;
- }
- set_page_extent_mapped(p);
- if (i == 0)
- eb->first_page = p;
- if (!PageUptodate(p))
- uptodate = 0;
- unlock_page(p);
- }
- if (uptodate)
- eb->flags |= EXTENT_UPTODATE;
- return eb;
- fail:
- free_extent_buffer(eb);
- return NULL;
- }
- EXPORT_SYMBOL(alloc_extent_buffer);
- struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
- u64 start, unsigned long len,
- gfp_t mask)
- {
- unsigned long num_pages = num_extent_pages(start, len);
- unsigned long i;
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- struct extent_buffer *eb;
- struct page *p;
- struct address_space *mapping = tree->mapping;
- eb = __alloc_extent_buffer(mask);
- if (!eb || IS_ERR(eb))
- return NULL;
- eb->alloc_addr = (unsigned long)__builtin_return_address(0);
- eb->start = start;
- eb->len = len;
- atomic_set(&eb->refs, 1);
- for (i = 0; i < num_pages; i++, index++) {
- p = find_get_page(mapping, index);
- if (!p) {
- /* make sure the free only frees the pages we've
- * grabbed a reference on
- */
- eb->len = i << PAGE_CACHE_SHIFT;
- eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
- goto fail;
- }
- set_page_extent_mapped(p);
- if (i == 0)
- eb->first_page = p;
- }
- return eb;
- fail:
- free_extent_buffer(eb);
- return NULL;
- }
- EXPORT_SYMBOL(find_extent_buffer);
- void free_extent_buffer(struct extent_buffer *eb)
- {
- unsigned long i;
- unsigned long num_pages;
- if (!eb)
- return;
- if (!atomic_dec_and_test(&eb->refs))
- return;
- num_pages = num_extent_pages(eb->start, eb->len);
- if (eb->first_page)
- page_cache_release(eb->first_page);
- for (i = 1; i < num_pages; i++) {
- page_cache_release(extent_buffer_page(eb, i));
- }
- __free_extent_buffer(eb);
- }
- EXPORT_SYMBOL(free_extent_buffer);
- int clear_extent_buffer_dirty(struct extent_map_tree *tree,
- struct extent_buffer *eb)
- {
- int set;
- unsigned long i;
- unsigned long num_pages;
- struct page *page;
- u64 start = eb->start;
- u64 end = start + eb->len - 1;
- set = clear_extent_dirty(tree, start, end, GFP_NOFS);
- num_pages = num_extent_pages(eb->start, eb->len);
- for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- lock_page(page);
- /*
- * if we're on the last page or the first page and the
- * block isn't aligned on a page boundary, do extra checks
- * to make sure we don't clean page that is partially dirty
- */
- if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
- ((i == num_pages - 1) &&
- ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) {
- start = page->index << PAGE_CACHE_SHIFT;
- end = start + PAGE_CACHE_SIZE - 1;
- if (test_range_bit(tree, start, end,
- EXTENT_DIRTY, 0)) {
- unlock_page(page);
- continue;
- }
- }
- clear_page_dirty_for_io(page);
- unlock_page(page);
- }
- return 0;
- }
- EXPORT_SYMBOL(clear_extent_buffer_dirty);
- int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
- struct extent_buffer *eb)
- {
- return wait_on_extent_writeback(tree, eb->start,
- eb->start + eb->len - 1);
- }
- EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
- int set_extent_buffer_dirty(struct extent_map_tree *tree,
- struct extent_buffer *eb)
- {
- return set_range_dirty(tree, eb->start, eb->start + eb->len - 1);
- }
- EXPORT_SYMBOL(set_extent_buffer_dirty);
- int set_extent_buffer_uptodate(struct extent_map_tree *tree,
- struct extent_buffer *eb)
- {
- unsigned long i;
- struct page *page;
- unsigned long num_pages;
- num_pages = num_extent_pages(eb->start, eb->len);
- set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- GFP_NOFS);
- for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
- ((i == num_pages - 1) &&
- ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) {
- check_page_uptodate(tree, page);
- continue;
- }
- SetPageUptodate(page);
- }
- return 0;
- }
- EXPORT_SYMBOL(set_extent_buffer_uptodate);
- int extent_buffer_uptodate(struct extent_map_tree *tree,
- struct extent_buffer *eb)
- {
- if (eb->flags & EXTENT_UPTODATE)
- return 1;
- return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
- EXTENT_UPTODATE, 1);
- }
- EXPORT_SYMBOL(extent_buffer_uptodate);
- int read_extent_buffer_pages(struct extent_map_tree *tree,
- struct extent_buffer *eb, int wait)
- {
- unsigned long i;
- struct page *page;
- int err;
- int ret = 0;
- unsigned long num_pages;
- if (eb->flags & EXTENT_UPTODATE)
- return 0;
- if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
- EXTENT_UPTODATE, 1)) {
- return 0;
- }
- num_pages = num_extent_pages(eb->start, eb->len);
- for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- if (PageUptodate(page)) {
- continue;
- }
- if (!wait) {
- if (TestSetPageLocked(page)) {
- continue;
- }
- } else {
- lock_page(page);
- }
- if (!PageUptodate(page)) {
- err = page->mapping->a_ops->readpage(NULL, page);
- if (err) {
- ret = err;
- }
- } else {
- unlock_page(page);
- }
- }
- if (ret || !wait) {
- return ret;
- }
- for (i = 0; i < num_pages; i++) {
- page = extent_buffer_page(eb, i);
- wait_on_page_locked(page);
- if (!PageUptodate(page)) {
- ret = -EIO;
- }
- }
- eb->flags |= EXTENT_UPTODATE;
- return ret;
- }
- EXPORT_SYMBOL(read_extent_buffer_pages);
- void read_extent_buffer(struct extent_buffer *eb, void *dstv,
- unsigned long start,
- unsigned long len)
- {
- size_t cur;
- size_t offset;
- struct page *page;
- char *kaddr;
- char *dst = (char *)dstv;
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
- WARN_ON(start > eb->len);
- WARN_ON(start + len > eb->start + eb->len);
- offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
- if (i == 0)
- offset += start_offset;
- while(len > 0) {
- page = extent_buffer_page(eb, i);
- WARN_ON(!PageUptodate(page));
- cur = min(len, (PAGE_CACHE_SIZE - offset));
- kaddr = kmap_atomic(page, KM_USER0);
- memcpy(dst, kaddr + offset, cur);
- kunmap_atomic(kaddr, KM_USER0);
- dst += cur;
- len -= cur;
- offset = 0;
- i++;
- }
- }
- EXPORT_SYMBOL(read_extent_buffer);
- static int __map_extent_buffer(struct extent_buffer *eb, unsigned long start,
- unsigned long min_len, char **token, char **map,
- unsigned long *map_start,
- unsigned long *map_len, int km)
- {
- size_t offset = start & (PAGE_CACHE_SIZE - 1);
- char *kaddr;
- struct page *p;
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
- unsigned long end_i = (start_offset + start + min_len) >>
- PAGE_CACHE_SHIFT;
- if (i != end_i)
- return -EINVAL;
- if (start >= eb->len) {
- printk("bad start in map eb start %Lu len %lu caller start %lu min %lu\n", eb->start, eb->len, start, min_len);
- WARN_ON(1);
- }
- if (i == 0) {
- offset = start_offset;
- *map_start = 0;
- } else {
- offset = 0;
- *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
- }
- p = extent_buffer_page(eb, i);
- WARN_ON(!PageUptodate(p));
- kaddr = kmap_atomic(p, km);
- *token = kaddr;
- *map = kaddr + offset;
- *map_len = PAGE_CACHE_SIZE - offset;
- return 0;
- }
- int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
- unsigned long min_len,
- char **token, char **map,
- unsigned long *map_start,
- unsigned long *map_len, int km)
- {
- int err;
- int save = 0;
- if (eb->map_token) {
- if (start >= eb->map_start &&
- start + min_len <= eb->map_start + eb->map_len) {
- *token = eb->map_token;
- *map = eb->kaddr;
- *map_start = eb->map_start;
- *map_len = eb->map_len;
- return 0;
- }
- unmap_extent_buffer(eb, eb->map_token, km);
- eb->map_token = NULL;
- save = 1;
- }
- err = __map_extent_buffer(eb, start, min_len, token, map,
- map_start, map_len, km);
- if (!err && save) {
- eb->map_token = *token;
- eb->kaddr = *map;
- eb->map_start = *map_start;
- eb->map_len = *map_len;
- }
- return err;
- }
- EXPORT_SYMBOL(map_extent_buffer);
- void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
- {
- kunmap_atomic(token, km);
- }
- EXPORT_SYMBOL(unmap_extent_buffer);
- int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
- unsigned long start,
- unsigned long len)
- {
- size_t cur;
- size_t offset;
- struct page *page;
- char *kaddr;
- char *ptr = (char *)ptrv;
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
- int ret = 0;
- WARN_ON(start > eb->len);
- WARN_ON(start + len > eb->start + eb->len);
- offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
- if (i == 0)
- offset += start_offset;
- while(len > 0) {
- page = extent_buffer_page(eb, i);
- WARN_ON(!PageUptodate(page));
- cur = min(len, (PAGE_CACHE_SIZE - offset));
- kaddr = kmap_atomic(page, KM_USER0);
- ret = memcmp(ptr, kaddr + offset, cur);
- kunmap_atomic(kaddr, KM_USER0);
- if (ret)
- break;
- ptr += cur;
- len -= cur;
- offset = 0;
- i++;
- }
- return ret;
- }
- EXPORT_SYMBOL(memcmp_extent_buffer);
- void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
- unsigned long start, unsigned long len)
- {
- size_t cur;
- size_t offset;
- struct page *page;
- char *kaddr;
- char *src = (char *)srcv;
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
- WARN_ON(start > eb->len);
- WARN_ON(start + len > eb->start + eb->len);
- offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
- if (i == 0)
- offset += start_offset;
- while(len > 0) {
- page = extent_buffer_page(eb, i);
- WARN_ON(!PageUptodate(page));
- cur = min(len, PAGE_CACHE_SIZE - offset);
- kaddr = kmap_atomic(page, KM_USER0);
- memcpy(kaddr + offset, src, cur);
- kunmap_atomic(kaddr, KM_USER0);
- src += cur;
- len -= cur;
- offset = 0;
- i++;
- }
- }
- EXPORT_SYMBOL(write_extent_buffer);
- void memset_extent_buffer(struct extent_buffer *eb, char c,
- unsigned long start, unsigned long len)
- {
- size_t cur;
- size_t offset;
- struct page *page;
- char *kaddr;
- size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
- WARN_ON(start > eb->len);
- WARN_ON(start + len > eb->start + eb->len);
- offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
- if (i == 0)
- offset += start_offset;
- while(len > 0) {
- page = extent_buffer_page(eb, i);
- WARN_ON(!PageUptodate(page));
- cur = min(len, PAGE_CACHE_SIZE - offset);
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, c, cur);
- kunmap_atomic(kaddr, KM_USER0);
- len -= cur;
- offset = 0;
- i++;
- }
- }
- EXPORT_SYMBOL(memset_extent_buffer);
- void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
- unsigned long dst_offset, unsigned long src_offset,
- unsigned long len)
- {
- u64 dst_len = dst->len;
- size_t cur;
- size_t offset;
- struct page *page;
- char *kaddr;
- size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
- WARN_ON(src->len != dst_len);
- offset = dst_offset & ((unsigned long)PAGE_CACHE_SIZE - 1);
- if (i == 0)
- offset += start_offset;
- while(len > 0) {
- page = extent_buffer_page(dst, i);
- WARN_ON(!PageUptodate(page));
- cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
- kaddr = kmap_atomic(page, KM_USER1);
- read_extent_buffer(src, kaddr + offset, src_offset, cur);
- kunmap_atomic(kaddr, KM_USER1);
- src_offset += cur;
- len -= cur;
- offset = 0;
- i++;
- }
- }
- EXPORT_SYMBOL(copy_extent_buffer);
- static void move_pages(struct page *dst_page, struct page *src_page,
- unsigned long dst_off, unsigned long src_off,
- unsigned long len)
- {
- char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
- if (dst_page == src_page) {
- memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
- } else {
- char *src_kaddr = kmap_atomic(src_page, KM_USER1);
- char *p = dst_kaddr + dst_off + len;
- char *s = src_kaddr + src_off + len;
- while (len--)
- *--p = *--s;
- kunmap_atomic(src_kaddr, KM_USER1);
- }
- kunmap_atomic(dst_kaddr, KM_USER0);
- }
- static void copy_pages(struct page *dst_page, struct page *src_page,
- unsigned long dst_off, unsigned long src_off,
- unsigned long len)
- {
- char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
- char *src_kaddr;
- if (dst_page != src_page)
- src_kaddr = kmap_atomic(src_page, KM_USER1);
- else
- src_kaddr = dst_kaddr;
- memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
- kunmap_atomic(dst_kaddr, KM_USER0);
- if (dst_page != src_page)
- kunmap_atomic(src_kaddr, KM_USER1);
- }
- void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
- unsigned long src_offset, unsigned long len)
- {
- size_t cur;
- size_t dst_off_in_page;
- size_t src_off_in_page;
- size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long dst_i;
- unsigned long src_i;
- if (src_offset + len > dst->len) {
- printk("memmove bogus src_offset %lu move len %lu len %lu\n",
- src_offset, len, dst->len);
- BUG_ON(1);
- }
- if (dst_offset + len > dst->len) {
- printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
- dst_offset, len, dst->len);
- BUG_ON(1);
- }
- while(len > 0) {
- dst_off_in_page = dst_offset &
- ((unsigned long)PAGE_CACHE_SIZE - 1);
- src_off_in_page = src_offset &
- ((unsigned long)PAGE_CACHE_SIZE - 1);
- dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
- src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
- if (src_i == 0)
- src_off_in_page += start_offset;
- if (dst_i == 0)
- dst_off_in_page += start_offset;
- cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
- src_off_in_page));
- cur = min(cur, (unsigned long)(PAGE_CACHE_SIZE -
- dst_off_in_page));
- copy_pages(extent_buffer_page(dst, dst_i),
- extent_buffer_page(dst, src_i),
- dst_off_in_page, src_off_in_page, cur);
- src_offset += cur;
- dst_offset += cur;
- len -= cur;
- }
- }
- EXPORT_SYMBOL(memcpy_extent_buffer);
- void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
- unsigned long src_offset, unsigned long len)
- {
- size_t cur;
- size_t dst_off_in_page;
- size_t src_off_in_page;
- unsigned long dst_end = dst_offset + len - 1;
- unsigned long src_end = src_offset + len - 1;
- size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
- unsigned long dst_i;
- unsigned long src_i;
- if (src_offset + len > dst->len) {
- printk("memmove bogus src_offset %lu move len %lu len %lu\n",
- src_offset, len, dst->len);
- BUG_ON(1);
- }
- if (dst_offset + len > dst->len) {
- printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
- dst_offset, len, dst->len);
- BUG_ON(1);
- }
- if (dst_offset < src_offset) {
- memcpy_extent_buffer(dst, dst_offset, src_offset, len);
- return;
- }
- while(len > 0) {
- dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
- src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
- dst_off_in_page = dst_end &
- ((unsigned long)PAGE_CACHE_SIZE - 1);
- src_off_in_page = src_end &
- ((unsigned long)PAGE_CACHE_SIZE - 1);
- if (src_i == 0)
- src_off_in_page += start_offset;
- if (dst_i == 0)
- dst_off_in_page += start_offset;
- cur = min(len, src_off_in_page + 1);
- cur = min(cur, dst_off_in_page + 1);
- move_pages(extent_buffer_page(dst, dst_i),
- extent_buffer_page(dst, src_i),
- dst_off_in_page - cur + 1,
- src_off_in_page - cur + 1, cur);
- dst_end -= cur;
- src_end -= cur;
- len -= cur;
- }
- }
- EXPORT_SYMBOL(memmove_extent_buffer);
|