extent_io.c 91 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717
  1. #include <linux/bitops.h>
  2. #include <linux/slab.h>
  3. #include <linux/bio.h>
  4. #include <linux/mm.h>
  5. #include <linux/gfp.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/page-flags.h>
  8. #include <linux/module.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/swap.h>
  12. #include <linux/version.h>
  13. #include <linux/writeback.h>
  14. #include <linux/pagevec.h>
  15. #include "extent_io.h"
  16. #include "extent_map.h"
  17. #include "compat.h"
  18. #include "ctree.h"
  19. #include "btrfs_inode.h"
  20. /* temporary define until extent_map moves out of btrfs */
  21. struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
  22. unsigned long extra_flags,
  23. void (*ctor)(void *, struct kmem_cache *,
  24. unsigned long));
  25. static struct kmem_cache *extent_state_cache;
  26. static struct kmem_cache *extent_buffer_cache;
  27. static LIST_HEAD(buffers);
  28. static LIST_HEAD(states);
  29. #define LEAK_DEBUG 0
  30. #ifdef LEAK_DEBUG
  31. static DEFINE_SPINLOCK(leak_lock);
  32. #endif
  33. #define BUFFER_LRU_MAX 64
  34. struct tree_entry {
  35. u64 start;
  36. u64 end;
  37. struct rb_node rb_node;
  38. };
  39. struct extent_page_data {
  40. struct bio *bio;
  41. struct extent_io_tree *tree;
  42. get_extent_t *get_extent;
  43. /* tells writepage not to lock the state bits for this range
  44. * it still does the unlocking
  45. */
  46. int extent_locked;
  47. };
  48. int __init extent_io_init(void)
  49. {
  50. extent_state_cache = btrfs_cache_create("extent_state",
  51. sizeof(struct extent_state), 0,
  52. NULL);
  53. if (!extent_state_cache)
  54. return -ENOMEM;
  55. extent_buffer_cache = btrfs_cache_create("extent_buffers",
  56. sizeof(struct extent_buffer), 0,
  57. NULL);
  58. if (!extent_buffer_cache)
  59. goto free_state_cache;
  60. return 0;
  61. free_state_cache:
  62. kmem_cache_destroy(extent_state_cache);
  63. return -ENOMEM;
  64. }
  65. void extent_io_exit(void)
  66. {
  67. struct extent_state *state;
  68. struct extent_buffer *eb;
  69. while (!list_empty(&states)) {
  70. state = list_entry(states.next, struct extent_state, leak_list);
  71. printk(KERN_ERR "btrfs state leak: start %llu end %llu "
  72. "state %lu in tree %p refs %d\n",
  73. (unsigned long long)state->start,
  74. (unsigned long long)state->end,
  75. state->state, state->tree, atomic_read(&state->refs));
  76. list_del(&state->leak_list);
  77. kmem_cache_free(extent_state_cache, state);
  78. }
  79. while (!list_empty(&buffers)) {
  80. eb = list_entry(buffers.next, struct extent_buffer, leak_list);
  81. printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
  82. "refs %d\n", (unsigned long long)eb->start,
  83. eb->len, atomic_read(&eb->refs));
  84. list_del(&eb->leak_list);
  85. kmem_cache_free(extent_buffer_cache, eb);
  86. }
  87. if (extent_state_cache)
  88. kmem_cache_destroy(extent_state_cache);
  89. if (extent_buffer_cache)
  90. kmem_cache_destroy(extent_buffer_cache);
  91. }
  92. void extent_io_tree_init(struct extent_io_tree *tree,
  93. struct address_space *mapping, gfp_t mask)
  94. {
  95. tree->state.rb_node = NULL;
  96. tree->buffer.rb_node = NULL;
  97. tree->ops = NULL;
  98. tree->dirty_bytes = 0;
  99. spin_lock_init(&tree->lock);
  100. spin_lock_init(&tree->buffer_lock);
  101. tree->mapping = mapping;
  102. }
  103. static struct extent_state *alloc_extent_state(gfp_t mask)
  104. {
  105. struct extent_state *state;
  106. #ifdef LEAK_DEBUG
  107. unsigned long flags;
  108. #endif
  109. state = kmem_cache_alloc(extent_state_cache, mask);
  110. if (!state)
  111. return state;
  112. state->state = 0;
  113. state->private = 0;
  114. state->tree = NULL;
  115. #ifdef LEAK_DEBUG
  116. spin_lock_irqsave(&leak_lock, flags);
  117. list_add(&state->leak_list, &states);
  118. spin_unlock_irqrestore(&leak_lock, flags);
  119. #endif
  120. atomic_set(&state->refs, 1);
  121. init_waitqueue_head(&state->wq);
  122. return state;
  123. }
  124. static void free_extent_state(struct extent_state *state)
  125. {
  126. if (!state)
  127. return;
  128. if (atomic_dec_and_test(&state->refs)) {
  129. #ifdef LEAK_DEBUG
  130. unsigned long flags;
  131. #endif
  132. WARN_ON(state->tree);
  133. #ifdef LEAK_DEBUG
  134. spin_lock_irqsave(&leak_lock, flags);
  135. list_del(&state->leak_list);
  136. spin_unlock_irqrestore(&leak_lock, flags);
  137. #endif
  138. kmem_cache_free(extent_state_cache, state);
  139. }
  140. }
  141. static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
  142. struct rb_node *node)
  143. {
  144. struct rb_node **p = &root->rb_node;
  145. struct rb_node *parent = NULL;
  146. struct tree_entry *entry;
  147. while (*p) {
  148. parent = *p;
  149. entry = rb_entry(parent, struct tree_entry, rb_node);
  150. if (offset < entry->start)
  151. p = &(*p)->rb_left;
  152. else if (offset > entry->end)
  153. p = &(*p)->rb_right;
  154. else
  155. return parent;
  156. }
  157. entry = rb_entry(node, struct tree_entry, rb_node);
  158. rb_link_node(node, parent, p);
  159. rb_insert_color(node, root);
  160. return NULL;
  161. }
  162. static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
  163. struct rb_node **prev_ret,
  164. struct rb_node **next_ret)
  165. {
  166. struct rb_root *root = &tree->state;
  167. struct rb_node *n = root->rb_node;
  168. struct rb_node *prev = NULL;
  169. struct rb_node *orig_prev = NULL;
  170. struct tree_entry *entry;
  171. struct tree_entry *prev_entry = NULL;
  172. while (n) {
  173. entry = rb_entry(n, struct tree_entry, rb_node);
  174. prev = n;
  175. prev_entry = entry;
  176. if (offset < entry->start)
  177. n = n->rb_left;
  178. else if (offset > entry->end)
  179. n = n->rb_right;
  180. else
  181. return n;
  182. }
  183. if (prev_ret) {
  184. orig_prev = prev;
  185. while (prev && offset > prev_entry->end) {
  186. prev = rb_next(prev);
  187. prev_entry = rb_entry(prev, struct tree_entry, rb_node);
  188. }
  189. *prev_ret = prev;
  190. prev = orig_prev;
  191. }
  192. if (next_ret) {
  193. prev_entry = rb_entry(prev, struct tree_entry, rb_node);
  194. while (prev && offset < prev_entry->start) {
  195. prev = rb_prev(prev);
  196. prev_entry = rb_entry(prev, struct tree_entry, rb_node);
  197. }
  198. *next_ret = prev;
  199. }
  200. return NULL;
  201. }
  202. static inline struct rb_node *tree_search(struct extent_io_tree *tree,
  203. u64 offset)
  204. {
  205. struct rb_node *prev = NULL;
  206. struct rb_node *ret;
  207. ret = __etree_search(tree, offset, &prev, NULL);
  208. if (!ret)
  209. return prev;
  210. return ret;
  211. }
  212. static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
  213. u64 offset, struct rb_node *node)
  214. {
  215. struct rb_root *root = &tree->buffer;
  216. struct rb_node **p = &root->rb_node;
  217. struct rb_node *parent = NULL;
  218. struct extent_buffer *eb;
  219. while (*p) {
  220. parent = *p;
  221. eb = rb_entry(parent, struct extent_buffer, rb_node);
  222. if (offset < eb->start)
  223. p = &(*p)->rb_left;
  224. else if (offset > eb->start)
  225. p = &(*p)->rb_right;
  226. else
  227. return eb;
  228. }
  229. rb_link_node(node, parent, p);
  230. rb_insert_color(node, root);
  231. return NULL;
  232. }
  233. static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
  234. u64 offset)
  235. {
  236. struct rb_root *root = &tree->buffer;
  237. struct rb_node *n = root->rb_node;
  238. struct extent_buffer *eb;
  239. while (n) {
  240. eb = rb_entry(n, struct extent_buffer, rb_node);
  241. if (offset < eb->start)
  242. n = n->rb_left;
  243. else if (offset > eb->start)
  244. n = n->rb_right;
  245. else
  246. return eb;
  247. }
  248. return NULL;
  249. }
  250. /*
  251. * utility function to look for merge candidates inside a given range.
  252. * Any extents with matching state are merged together into a single
  253. * extent in the tree. Extents with EXTENT_IO in their state field
  254. * are not merged because the end_io handlers need to be able to do
  255. * operations on them without sleeping (or doing allocations/splits).
  256. *
  257. * This should be called with the tree lock held.
  258. */
  259. static int merge_state(struct extent_io_tree *tree,
  260. struct extent_state *state)
  261. {
  262. struct extent_state *other;
  263. struct rb_node *other_node;
  264. if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
  265. return 0;
  266. other_node = rb_prev(&state->rb_node);
  267. if (other_node) {
  268. other = rb_entry(other_node, struct extent_state, rb_node);
  269. if (other->end == state->start - 1 &&
  270. other->state == state->state) {
  271. state->start = other->start;
  272. other->tree = NULL;
  273. rb_erase(&other->rb_node, &tree->state);
  274. free_extent_state(other);
  275. }
  276. }
  277. other_node = rb_next(&state->rb_node);
  278. if (other_node) {
  279. other = rb_entry(other_node, struct extent_state, rb_node);
  280. if (other->start == state->end + 1 &&
  281. other->state == state->state) {
  282. other->start = state->start;
  283. state->tree = NULL;
  284. rb_erase(&state->rb_node, &tree->state);
  285. free_extent_state(state);
  286. }
  287. }
  288. return 0;
  289. }
  290. static void set_state_cb(struct extent_io_tree *tree,
  291. struct extent_state *state,
  292. unsigned long bits)
  293. {
  294. if (tree->ops && tree->ops->set_bit_hook) {
  295. tree->ops->set_bit_hook(tree->mapping->host, state->start,
  296. state->end, state->state, bits);
  297. }
  298. }
  299. static void clear_state_cb(struct extent_io_tree *tree,
  300. struct extent_state *state,
  301. unsigned long bits)
  302. {
  303. if (tree->ops && tree->ops->clear_bit_hook) {
  304. tree->ops->clear_bit_hook(tree->mapping->host, state->start,
  305. state->end, state->state, bits);
  306. }
  307. }
  308. /*
  309. * insert an extent_state struct into the tree. 'bits' are set on the
  310. * struct before it is inserted.
  311. *
  312. * This may return -EEXIST if the extent is already there, in which case the
  313. * state struct is freed.
  314. *
  315. * The tree lock is not taken internally. This is a utility function and
  316. * probably isn't what you want to call (see set/clear_extent_bit).
  317. */
  318. static int insert_state(struct extent_io_tree *tree,
  319. struct extent_state *state, u64 start, u64 end,
  320. int bits)
  321. {
  322. struct rb_node *node;
  323. if (end < start) {
  324. printk(KERN_ERR "btrfs end < start %llu %llu\n",
  325. (unsigned long long)end,
  326. (unsigned long long)start);
  327. WARN_ON(1);
  328. }
  329. if (bits & EXTENT_DIRTY)
  330. tree->dirty_bytes += end - start + 1;
  331. set_state_cb(tree, state, bits);
  332. state->state |= bits;
  333. state->start = start;
  334. state->end = end;
  335. node = tree_insert(&tree->state, end, &state->rb_node);
  336. if (node) {
  337. struct extent_state *found;
  338. found = rb_entry(node, struct extent_state, rb_node);
  339. printk(KERN_ERR "btrfs found node %llu %llu on insert of "
  340. "%llu %llu\n", (unsigned long long)found->start,
  341. (unsigned long long)found->end,
  342. (unsigned long long)start, (unsigned long long)end);
  343. free_extent_state(state);
  344. return -EEXIST;
  345. }
  346. state->tree = tree;
  347. merge_state(tree, state);
  348. return 0;
  349. }
  350. /*
  351. * split a given extent state struct in two, inserting the preallocated
  352. * struct 'prealloc' as the newly created second half. 'split' indicates an
  353. * offset inside 'orig' where it should be split.
  354. *
  355. * Before calling,
  356. * the tree has 'orig' at [orig->start, orig->end]. After calling, there
  357. * are two extent state structs in the tree:
  358. * prealloc: [orig->start, split - 1]
  359. * orig: [ split, orig->end ]
  360. *
  361. * The tree locks are not taken by this function. They need to be held
  362. * by the caller.
  363. */
  364. static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
  365. struct extent_state *prealloc, u64 split)
  366. {
  367. struct rb_node *node;
  368. prealloc->start = orig->start;
  369. prealloc->end = split - 1;
  370. prealloc->state = orig->state;
  371. orig->start = split;
  372. node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
  373. if (node) {
  374. struct extent_state *found;
  375. found = rb_entry(node, struct extent_state, rb_node);
  376. free_extent_state(prealloc);
  377. return -EEXIST;
  378. }
  379. prealloc->tree = tree;
  380. return 0;
  381. }
  382. /*
  383. * utility function to clear some bits in an extent state struct.
  384. * it will optionally wake up any one waiting on this state (wake == 1), or
  385. * forcibly remove the state from the tree (delete == 1).
  386. *
  387. * If no bits are set on the state struct after clearing things, the
  388. * struct is freed and removed from the tree
  389. */
  390. static int clear_state_bit(struct extent_io_tree *tree,
  391. struct extent_state *state, int bits, int wake,
  392. int delete)
  393. {
  394. int ret = state->state & bits;
  395. if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
  396. u64 range = state->end - state->start + 1;
  397. WARN_ON(range > tree->dirty_bytes);
  398. tree->dirty_bytes -= range;
  399. }
  400. clear_state_cb(tree, state, bits);
  401. state->state &= ~bits;
  402. if (wake)
  403. wake_up(&state->wq);
  404. if (delete || state->state == 0) {
  405. if (state->tree) {
  406. clear_state_cb(tree, state, state->state);
  407. rb_erase(&state->rb_node, &tree->state);
  408. state->tree = NULL;
  409. free_extent_state(state);
  410. } else {
  411. WARN_ON(1);
  412. }
  413. } else {
  414. merge_state(tree, state);
  415. }
  416. return ret;
  417. }
  418. /*
  419. * clear some bits on a range in the tree. This may require splitting
  420. * or inserting elements in the tree, so the gfp mask is used to
  421. * indicate which allocations or sleeping are allowed.
  422. *
  423. * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
  424. * the given range from the tree regardless of state (ie for truncate).
  425. *
  426. * the range [start, end] is inclusive.
  427. *
  428. * This takes the tree lock, and returns < 0 on error, > 0 if any of the
  429. * bits were already set, or zero if none of the bits were already set.
  430. */
  431. int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  432. int bits, int wake, int delete, gfp_t mask)
  433. {
  434. struct extent_state *state;
  435. struct extent_state *prealloc = NULL;
  436. struct rb_node *node;
  437. int err;
  438. int set = 0;
  439. again:
  440. if (!prealloc && (mask & __GFP_WAIT)) {
  441. prealloc = alloc_extent_state(mask);
  442. if (!prealloc)
  443. return -ENOMEM;
  444. }
  445. spin_lock(&tree->lock);
  446. /*
  447. * this search will find the extents that end after
  448. * our range starts
  449. */
  450. node = tree_search(tree, start);
  451. if (!node)
  452. goto out;
  453. state = rb_entry(node, struct extent_state, rb_node);
  454. if (state->start > end)
  455. goto out;
  456. WARN_ON(state->end < start);
  457. /*
  458. * | ---- desired range ---- |
  459. * | state | or
  460. * | ------------- state -------------- |
  461. *
  462. * We need to split the extent we found, and may flip
  463. * bits on second half.
  464. *
  465. * If the extent we found extends past our range, we
  466. * just split and search again. It'll get split again
  467. * the next time though.
  468. *
  469. * If the extent we found is inside our range, we clear
  470. * the desired bit on it.
  471. */
  472. if (state->start < start) {
  473. if (!prealloc)
  474. prealloc = alloc_extent_state(GFP_ATOMIC);
  475. err = split_state(tree, state, prealloc, start);
  476. BUG_ON(err == -EEXIST);
  477. prealloc = NULL;
  478. if (err)
  479. goto out;
  480. if (state->end <= end) {
  481. start = state->end + 1;
  482. set |= clear_state_bit(tree, state, bits,
  483. wake, delete);
  484. } else {
  485. start = state->start;
  486. }
  487. goto search_again;
  488. }
  489. /*
  490. * | ---- desired range ---- |
  491. * | state |
  492. * We need to split the extent, and clear the bit
  493. * on the first half
  494. */
  495. if (state->start <= end && state->end > end) {
  496. if (!prealloc)
  497. prealloc = alloc_extent_state(GFP_ATOMIC);
  498. err = split_state(tree, state, prealloc, end + 1);
  499. BUG_ON(err == -EEXIST);
  500. if (wake)
  501. wake_up(&state->wq);
  502. set |= clear_state_bit(tree, prealloc, bits,
  503. wake, delete);
  504. prealloc = NULL;
  505. goto out;
  506. }
  507. start = state->end + 1;
  508. set |= clear_state_bit(tree, state, bits, wake, delete);
  509. goto search_again;
  510. out:
  511. spin_unlock(&tree->lock);
  512. if (prealloc)
  513. free_extent_state(prealloc);
  514. return set;
  515. search_again:
  516. if (start > end)
  517. goto out;
  518. spin_unlock(&tree->lock);
  519. if (mask & __GFP_WAIT)
  520. cond_resched();
  521. goto again;
  522. }
  523. static int wait_on_state(struct extent_io_tree *tree,
  524. struct extent_state *state)
  525. __releases(tree->lock)
  526. __acquires(tree->lock)
  527. {
  528. DEFINE_WAIT(wait);
  529. prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
  530. spin_unlock(&tree->lock);
  531. schedule();
  532. spin_lock(&tree->lock);
  533. finish_wait(&state->wq, &wait);
  534. return 0;
  535. }
  536. /*
  537. * waits for one or more bits to clear on a range in the state tree.
  538. * The range [start, end] is inclusive.
  539. * The tree lock is taken by this function
  540. */
  541. int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
  542. {
  543. struct extent_state *state;
  544. struct rb_node *node;
  545. spin_lock(&tree->lock);
  546. again:
  547. while (1) {
  548. /*
  549. * this search will find all the extents that end after
  550. * our range starts
  551. */
  552. node = tree_search(tree, start);
  553. if (!node)
  554. break;
  555. state = rb_entry(node, struct extent_state, rb_node);
  556. if (state->start > end)
  557. goto out;
  558. if (state->state & bits) {
  559. start = state->start;
  560. atomic_inc(&state->refs);
  561. wait_on_state(tree, state);
  562. free_extent_state(state);
  563. goto again;
  564. }
  565. start = state->end + 1;
  566. if (start > end)
  567. break;
  568. if (need_resched()) {
  569. spin_unlock(&tree->lock);
  570. cond_resched();
  571. spin_lock(&tree->lock);
  572. }
  573. }
  574. out:
  575. spin_unlock(&tree->lock);
  576. return 0;
  577. }
  578. static void set_state_bits(struct extent_io_tree *tree,
  579. struct extent_state *state,
  580. int bits)
  581. {
  582. if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
  583. u64 range = state->end - state->start + 1;
  584. tree->dirty_bytes += range;
  585. }
  586. set_state_cb(tree, state, bits);
  587. state->state |= bits;
  588. }
  589. /*
  590. * set some bits on a range in the tree. This may require allocations
  591. * or sleeping, so the gfp mask is used to indicate what is allowed.
  592. *
  593. * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
  594. * range already has the desired bits set. The start of the existing
  595. * range is returned in failed_start in this case.
  596. *
  597. * [start, end] is inclusive
  598. * This takes the tree lock.
  599. */
  600. static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  601. int bits, int exclusive, u64 *failed_start,
  602. gfp_t mask)
  603. {
  604. struct extent_state *state;
  605. struct extent_state *prealloc = NULL;
  606. struct rb_node *node;
  607. int err = 0;
  608. int set;
  609. u64 last_start;
  610. u64 last_end;
  611. again:
  612. if (!prealloc && (mask & __GFP_WAIT)) {
  613. prealloc = alloc_extent_state(mask);
  614. if (!prealloc)
  615. return -ENOMEM;
  616. }
  617. spin_lock(&tree->lock);
  618. /*
  619. * this search will find all the extents that end after
  620. * our range starts.
  621. */
  622. node = tree_search(tree, start);
  623. if (!node) {
  624. err = insert_state(tree, prealloc, start, end, bits);
  625. prealloc = NULL;
  626. BUG_ON(err == -EEXIST);
  627. goto out;
  628. }
  629. state = rb_entry(node, struct extent_state, rb_node);
  630. last_start = state->start;
  631. last_end = state->end;
  632. /*
  633. * | ---- desired range ---- |
  634. * | state |
  635. *
  636. * Just lock what we found and keep going
  637. */
  638. if (state->start == start && state->end <= end) {
  639. set = state->state & bits;
  640. if (set && exclusive) {
  641. *failed_start = state->start;
  642. err = -EEXIST;
  643. goto out;
  644. }
  645. set_state_bits(tree, state, bits);
  646. start = state->end + 1;
  647. merge_state(tree, state);
  648. goto search_again;
  649. }
  650. /*
  651. * | ---- desired range ---- |
  652. * | state |
  653. * or
  654. * | ------------- state -------------- |
  655. *
  656. * We need to split the extent we found, and may flip bits on
  657. * second half.
  658. *
  659. * If the extent we found extends past our
  660. * range, we just split and search again. It'll get split
  661. * again the next time though.
  662. *
  663. * If the extent we found is inside our range, we set the
  664. * desired bit on it.
  665. */
  666. if (state->start < start) {
  667. set = state->state & bits;
  668. if (exclusive && set) {
  669. *failed_start = start;
  670. err = -EEXIST;
  671. goto out;
  672. }
  673. err = split_state(tree, state, prealloc, start);
  674. BUG_ON(err == -EEXIST);
  675. prealloc = NULL;
  676. if (err)
  677. goto out;
  678. if (state->end <= end) {
  679. set_state_bits(tree, state, bits);
  680. start = state->end + 1;
  681. merge_state(tree, state);
  682. } else {
  683. start = state->start;
  684. }
  685. goto search_again;
  686. }
  687. /*
  688. * | ---- desired range ---- |
  689. * | state | or | state |
  690. *
  691. * There's a hole, we need to insert something in it and
  692. * ignore the extent we found.
  693. */
  694. if (state->start > start) {
  695. u64 this_end;
  696. if (end < last_start)
  697. this_end = end;
  698. else
  699. this_end = last_start - 1;
  700. err = insert_state(tree, prealloc, start, this_end,
  701. bits);
  702. prealloc = NULL;
  703. BUG_ON(err == -EEXIST);
  704. if (err)
  705. goto out;
  706. start = this_end + 1;
  707. goto search_again;
  708. }
  709. /*
  710. * | ---- desired range ---- |
  711. * | state |
  712. * We need to split the extent, and set the bit
  713. * on the first half
  714. */
  715. if (state->start <= end && state->end > end) {
  716. set = state->state & bits;
  717. if (exclusive && set) {
  718. *failed_start = start;
  719. err = -EEXIST;
  720. goto out;
  721. }
  722. err = split_state(tree, state, prealloc, end + 1);
  723. BUG_ON(err == -EEXIST);
  724. set_state_bits(tree, prealloc, bits);
  725. merge_state(tree, prealloc);
  726. prealloc = NULL;
  727. goto out;
  728. }
  729. goto search_again;
  730. out:
  731. spin_unlock(&tree->lock);
  732. if (prealloc)
  733. free_extent_state(prealloc);
  734. return err;
  735. search_again:
  736. if (start > end)
  737. goto out;
  738. spin_unlock(&tree->lock);
  739. if (mask & __GFP_WAIT)
  740. cond_resched();
  741. goto again;
  742. }
  743. /* wrappers around set/clear extent bit */
  744. int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
  745. gfp_t mask)
  746. {
  747. return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
  748. mask);
  749. }
  750. int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
  751. gfp_t mask)
  752. {
  753. return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
  754. }
  755. int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
  756. int bits, gfp_t mask)
  757. {
  758. return set_extent_bit(tree, start, end, bits, 0, NULL,
  759. mask);
  760. }
  761. int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
  762. int bits, gfp_t mask)
  763. {
  764. return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
  765. }
  766. int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
  767. gfp_t mask)
  768. {
  769. return set_extent_bit(tree, start, end,
  770. EXTENT_DELALLOC | EXTENT_DIRTY,
  771. 0, NULL, mask);
  772. }
  773. int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
  774. gfp_t mask)
  775. {
  776. return clear_extent_bit(tree, start, end,
  777. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
  778. }
  779. int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
  780. gfp_t mask)
  781. {
  782. return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
  783. }
  784. int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
  785. gfp_t mask)
  786. {
  787. return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
  788. mask);
  789. }
  790. static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
  791. gfp_t mask)
  792. {
  793. return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
  794. }
  795. int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
  796. gfp_t mask)
  797. {
  798. return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
  799. mask);
  800. }
  801. static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
  802. u64 end, gfp_t mask)
  803. {
  804. return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
  805. }
  806. static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
  807. gfp_t mask)
  808. {
  809. return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
  810. 0, NULL, mask);
  811. }
  812. static int clear_extent_writeback(struct extent_io_tree *tree, u64 start,
  813. u64 end, gfp_t mask)
  814. {
  815. return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
  816. }
  817. int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
  818. {
  819. return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
  820. }
  821. /*
  822. * either insert or lock state struct between start and end use mask to tell
  823. * us if waiting is desired.
  824. */
  825. int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
  826. {
  827. int err;
  828. u64 failed_start;
  829. while (1) {
  830. err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
  831. &failed_start, mask);
  832. if (err == -EEXIST && (mask & __GFP_WAIT)) {
  833. wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
  834. start = failed_start;
  835. } else {
  836. break;
  837. }
  838. WARN_ON(start > end);
  839. }
  840. return err;
  841. }
  842. int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
  843. gfp_t mask)
  844. {
  845. int err;
  846. u64 failed_start;
  847. err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
  848. &failed_start, mask);
  849. if (err == -EEXIST) {
  850. if (failed_start > start)
  851. clear_extent_bit(tree, start, failed_start - 1,
  852. EXTENT_LOCKED, 1, 0, mask);
  853. return 0;
  854. }
  855. return 1;
  856. }
  857. int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
  858. gfp_t mask)
  859. {
  860. return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
  861. }
  862. /*
  863. * helper function to set pages and extents in the tree dirty
  864. */
  865. int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
  866. {
  867. unsigned long index = start >> PAGE_CACHE_SHIFT;
  868. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  869. struct page *page;
  870. while (index <= end_index) {
  871. page = find_get_page(tree->mapping, index);
  872. BUG_ON(!page);
  873. __set_page_dirty_nobuffers(page);
  874. page_cache_release(page);
  875. index++;
  876. }
  877. set_extent_dirty(tree, start, end, GFP_NOFS);
  878. return 0;
  879. }
  880. /*
  881. * helper function to set both pages and extents in the tree writeback
  882. */
  883. static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
  884. {
  885. unsigned long index = start >> PAGE_CACHE_SHIFT;
  886. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  887. struct page *page;
  888. while (index <= end_index) {
  889. page = find_get_page(tree->mapping, index);
  890. BUG_ON(!page);
  891. set_page_writeback(page);
  892. page_cache_release(page);
  893. index++;
  894. }
  895. set_extent_writeback(tree, start, end, GFP_NOFS);
  896. return 0;
  897. }
  898. /*
  899. * find the first offset in the io tree with 'bits' set. zero is
  900. * returned if we find something, and *start_ret and *end_ret are
  901. * set to reflect the state struct that was found.
  902. *
  903. * If nothing was found, 1 is returned, < 0 on error
  904. */
  905. int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
  906. u64 *start_ret, u64 *end_ret, int bits)
  907. {
  908. struct rb_node *node;
  909. struct extent_state *state;
  910. int ret = 1;
  911. spin_lock(&tree->lock);
  912. /*
  913. * this search will find all the extents that end after
  914. * our range starts.
  915. */
  916. node = tree_search(tree, start);
  917. if (!node)
  918. goto out;
  919. while (1) {
  920. state = rb_entry(node, struct extent_state, rb_node);
  921. if (state->end >= start && (state->state & bits)) {
  922. *start_ret = state->start;
  923. *end_ret = state->end;
  924. ret = 0;
  925. break;
  926. }
  927. node = rb_next(node);
  928. if (!node)
  929. break;
  930. }
  931. out:
  932. spin_unlock(&tree->lock);
  933. return ret;
  934. }
  935. /* find the first state struct with 'bits' set after 'start', and
  936. * return it. tree->lock must be held. NULL will returned if
  937. * nothing was found after 'start'
  938. */
  939. struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
  940. u64 start, int bits)
  941. {
  942. struct rb_node *node;
  943. struct extent_state *state;
  944. /*
  945. * this search will find all the extents that end after
  946. * our range starts.
  947. */
  948. node = tree_search(tree, start);
  949. if (!node)
  950. goto out;
  951. while (1) {
  952. state = rb_entry(node, struct extent_state, rb_node);
  953. if (state->end >= start && (state->state & bits))
  954. return state;
  955. node = rb_next(node);
  956. if (!node)
  957. break;
  958. }
  959. out:
  960. return NULL;
  961. }
  962. /*
  963. * find a contiguous range of bytes in the file marked as delalloc, not
  964. * more than 'max_bytes'. start and end are used to return the range,
  965. *
  966. * 1 is returned if we find something, 0 if nothing was in the tree
  967. */
  968. static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
  969. u64 *start, u64 *end, u64 max_bytes)
  970. {
  971. struct rb_node *node;
  972. struct extent_state *state;
  973. u64 cur_start = *start;
  974. u64 found = 0;
  975. u64 total_bytes = 0;
  976. spin_lock(&tree->lock);
  977. /*
  978. * this search will find all the extents that end after
  979. * our range starts.
  980. */
  981. node = tree_search(tree, cur_start);
  982. if (!node) {
  983. if (!found)
  984. *end = (u64)-1;
  985. goto out;
  986. }
  987. while (1) {
  988. state = rb_entry(node, struct extent_state, rb_node);
  989. if (found && (state->start != cur_start ||
  990. (state->state & EXTENT_BOUNDARY))) {
  991. goto out;
  992. }
  993. if (!(state->state & EXTENT_DELALLOC)) {
  994. if (!found)
  995. *end = state->end;
  996. goto out;
  997. }
  998. if (!found)
  999. *start = state->start;
  1000. found++;
  1001. *end = state->end;
  1002. cur_start = state->end + 1;
  1003. node = rb_next(node);
  1004. if (!node)
  1005. break;
  1006. total_bytes += state->end - state->start + 1;
  1007. if (total_bytes >= max_bytes)
  1008. break;
  1009. }
  1010. out:
  1011. spin_unlock(&tree->lock);
  1012. return found;
  1013. }
  1014. static noinline int __unlock_for_delalloc(struct inode *inode,
  1015. struct page *locked_page,
  1016. u64 start, u64 end)
  1017. {
  1018. int ret;
  1019. struct page *pages[16];
  1020. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1021. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1022. unsigned long nr_pages = end_index - index + 1;
  1023. int i;
  1024. if (index == locked_page->index && end_index == index)
  1025. return 0;
  1026. while (nr_pages > 0) {
  1027. ret = find_get_pages_contig(inode->i_mapping, index,
  1028. min_t(unsigned long, nr_pages,
  1029. ARRAY_SIZE(pages)), pages);
  1030. for (i = 0; i < ret; i++) {
  1031. if (pages[i] != locked_page)
  1032. unlock_page(pages[i]);
  1033. page_cache_release(pages[i]);
  1034. }
  1035. nr_pages -= ret;
  1036. index += ret;
  1037. cond_resched();
  1038. }
  1039. return 0;
  1040. }
  1041. static noinline int lock_delalloc_pages(struct inode *inode,
  1042. struct page *locked_page,
  1043. u64 delalloc_start,
  1044. u64 delalloc_end)
  1045. {
  1046. unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
  1047. unsigned long start_index = index;
  1048. unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
  1049. unsigned long pages_locked = 0;
  1050. struct page *pages[16];
  1051. unsigned long nrpages;
  1052. int ret;
  1053. int i;
  1054. /* the caller is responsible for locking the start index */
  1055. if (index == locked_page->index && index == end_index)
  1056. return 0;
  1057. /* skip the page at the start index */
  1058. nrpages = end_index - index + 1;
  1059. while (nrpages > 0) {
  1060. ret = find_get_pages_contig(inode->i_mapping, index,
  1061. min_t(unsigned long,
  1062. nrpages, ARRAY_SIZE(pages)), pages);
  1063. if (ret == 0) {
  1064. ret = -EAGAIN;
  1065. goto done;
  1066. }
  1067. /* now we have an array of pages, lock them all */
  1068. for (i = 0; i < ret; i++) {
  1069. /*
  1070. * the caller is taking responsibility for
  1071. * locked_page
  1072. */
  1073. if (pages[i] != locked_page) {
  1074. lock_page(pages[i]);
  1075. if (!PageDirty(pages[i]) ||
  1076. pages[i]->mapping != inode->i_mapping) {
  1077. ret = -EAGAIN;
  1078. unlock_page(pages[i]);
  1079. page_cache_release(pages[i]);
  1080. goto done;
  1081. }
  1082. }
  1083. page_cache_release(pages[i]);
  1084. pages_locked++;
  1085. }
  1086. nrpages -= ret;
  1087. index += ret;
  1088. cond_resched();
  1089. }
  1090. ret = 0;
  1091. done:
  1092. if (ret && pages_locked) {
  1093. __unlock_for_delalloc(inode, locked_page,
  1094. delalloc_start,
  1095. ((u64)(start_index + pages_locked - 1)) <<
  1096. PAGE_CACHE_SHIFT);
  1097. }
  1098. return ret;
  1099. }
  1100. /*
  1101. * find a contiguous range of bytes in the file marked as delalloc, not
  1102. * more than 'max_bytes'. start and end are used to return the range,
  1103. *
  1104. * 1 is returned if we find something, 0 if nothing was in the tree
  1105. */
  1106. static noinline u64 find_lock_delalloc_range(struct inode *inode,
  1107. struct extent_io_tree *tree,
  1108. struct page *locked_page,
  1109. u64 *start, u64 *end,
  1110. u64 max_bytes)
  1111. {
  1112. u64 delalloc_start;
  1113. u64 delalloc_end;
  1114. u64 found;
  1115. int ret;
  1116. int loops = 0;
  1117. again:
  1118. /* step one, find a bunch of delalloc bytes starting at start */
  1119. delalloc_start = *start;
  1120. delalloc_end = 0;
  1121. found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
  1122. max_bytes);
  1123. if (!found || delalloc_end <= *start) {
  1124. *start = delalloc_start;
  1125. *end = delalloc_end;
  1126. return found;
  1127. }
  1128. /*
  1129. * start comes from the offset of locked_page. We have to lock
  1130. * pages in order, so we can't process delalloc bytes before
  1131. * locked_page
  1132. */
  1133. if (delalloc_start < *start)
  1134. delalloc_start = *start;
  1135. /*
  1136. * make sure to limit the number of pages we try to lock down
  1137. * if we're looping.
  1138. */
  1139. if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
  1140. delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
  1141. /* step two, lock all the pages after the page that has start */
  1142. ret = lock_delalloc_pages(inode, locked_page,
  1143. delalloc_start, delalloc_end);
  1144. if (ret == -EAGAIN) {
  1145. /* some of the pages are gone, lets avoid looping by
  1146. * shortening the size of the delalloc range we're searching
  1147. */
  1148. if (!loops) {
  1149. unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
  1150. max_bytes = PAGE_CACHE_SIZE - offset;
  1151. loops = 1;
  1152. goto again;
  1153. } else {
  1154. found = 0;
  1155. goto out_failed;
  1156. }
  1157. }
  1158. BUG_ON(ret);
  1159. /* step three, lock the state bits for the whole range */
  1160. lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
  1161. /* then test to make sure it is all still delalloc */
  1162. ret = test_range_bit(tree, delalloc_start, delalloc_end,
  1163. EXTENT_DELALLOC, 1);
  1164. if (!ret) {
  1165. unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
  1166. __unlock_for_delalloc(inode, locked_page,
  1167. delalloc_start, delalloc_end);
  1168. cond_resched();
  1169. goto again;
  1170. }
  1171. *start = delalloc_start;
  1172. *end = delalloc_end;
  1173. out_failed:
  1174. return found;
  1175. }
  1176. int extent_clear_unlock_delalloc(struct inode *inode,
  1177. struct extent_io_tree *tree,
  1178. u64 start, u64 end, struct page *locked_page,
  1179. int unlock_pages,
  1180. int clear_unlock,
  1181. int clear_delalloc, int clear_dirty,
  1182. int set_writeback,
  1183. int end_writeback)
  1184. {
  1185. int ret;
  1186. struct page *pages[16];
  1187. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1188. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1189. unsigned long nr_pages = end_index - index + 1;
  1190. int i;
  1191. int clear_bits = 0;
  1192. if (clear_unlock)
  1193. clear_bits |= EXTENT_LOCKED;
  1194. if (clear_dirty)
  1195. clear_bits |= EXTENT_DIRTY;
  1196. if (clear_delalloc)
  1197. clear_bits |= EXTENT_DELALLOC;
  1198. clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
  1199. if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
  1200. return 0;
  1201. while (nr_pages > 0) {
  1202. ret = find_get_pages_contig(inode->i_mapping, index,
  1203. min_t(unsigned long,
  1204. nr_pages, ARRAY_SIZE(pages)), pages);
  1205. for (i = 0; i < ret; i++) {
  1206. if (pages[i] == locked_page) {
  1207. page_cache_release(pages[i]);
  1208. continue;
  1209. }
  1210. if (clear_dirty)
  1211. clear_page_dirty_for_io(pages[i]);
  1212. if (set_writeback)
  1213. set_page_writeback(pages[i]);
  1214. if (end_writeback)
  1215. end_page_writeback(pages[i]);
  1216. if (unlock_pages)
  1217. unlock_page(pages[i]);
  1218. page_cache_release(pages[i]);
  1219. }
  1220. nr_pages -= ret;
  1221. index += ret;
  1222. cond_resched();
  1223. }
  1224. return 0;
  1225. }
  1226. /*
  1227. * count the number of bytes in the tree that have a given bit(s)
  1228. * set. This can be fairly slow, except for EXTENT_DIRTY which is
  1229. * cached. The total number found is returned.
  1230. */
  1231. u64 count_range_bits(struct extent_io_tree *tree,
  1232. u64 *start, u64 search_end, u64 max_bytes,
  1233. unsigned long bits)
  1234. {
  1235. struct rb_node *node;
  1236. struct extent_state *state;
  1237. u64 cur_start = *start;
  1238. u64 total_bytes = 0;
  1239. int found = 0;
  1240. if (search_end <= cur_start) {
  1241. WARN_ON(1);
  1242. return 0;
  1243. }
  1244. spin_lock(&tree->lock);
  1245. if (cur_start == 0 && bits == EXTENT_DIRTY) {
  1246. total_bytes = tree->dirty_bytes;
  1247. goto out;
  1248. }
  1249. /*
  1250. * this search will find all the extents that end after
  1251. * our range starts.
  1252. */
  1253. node = tree_search(tree, cur_start);
  1254. if (!node)
  1255. goto out;
  1256. while (1) {
  1257. state = rb_entry(node, struct extent_state, rb_node);
  1258. if (state->start > search_end)
  1259. break;
  1260. if (state->end >= cur_start && (state->state & bits)) {
  1261. total_bytes += min(search_end, state->end) + 1 -
  1262. max(cur_start, state->start);
  1263. if (total_bytes >= max_bytes)
  1264. break;
  1265. if (!found) {
  1266. *start = state->start;
  1267. found = 1;
  1268. }
  1269. }
  1270. node = rb_next(node);
  1271. if (!node)
  1272. break;
  1273. }
  1274. out:
  1275. spin_unlock(&tree->lock);
  1276. return total_bytes;
  1277. }
  1278. #if 0
  1279. /*
  1280. * helper function to lock both pages and extents in the tree.
  1281. * pages must be locked first.
  1282. */
  1283. static int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
  1284. {
  1285. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1286. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1287. struct page *page;
  1288. int err;
  1289. while (index <= end_index) {
  1290. page = grab_cache_page(tree->mapping, index);
  1291. if (!page) {
  1292. err = -ENOMEM;
  1293. goto failed;
  1294. }
  1295. if (IS_ERR(page)) {
  1296. err = PTR_ERR(page);
  1297. goto failed;
  1298. }
  1299. index++;
  1300. }
  1301. lock_extent(tree, start, end, GFP_NOFS);
  1302. return 0;
  1303. failed:
  1304. /*
  1305. * we failed above in getting the page at 'index', so we undo here
  1306. * up to but not including the page at 'index'
  1307. */
  1308. end_index = index;
  1309. index = start >> PAGE_CACHE_SHIFT;
  1310. while (index < end_index) {
  1311. page = find_get_page(tree->mapping, index);
  1312. unlock_page(page);
  1313. page_cache_release(page);
  1314. index++;
  1315. }
  1316. return err;
  1317. }
  1318. /*
  1319. * helper function to unlock both pages and extents in the tree.
  1320. */
  1321. static int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
  1322. {
  1323. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1324. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1325. struct page *page;
  1326. while (index <= end_index) {
  1327. page = find_get_page(tree->mapping, index);
  1328. unlock_page(page);
  1329. page_cache_release(page);
  1330. index++;
  1331. }
  1332. unlock_extent(tree, start, end, GFP_NOFS);
  1333. return 0;
  1334. }
  1335. #endif
  1336. /*
  1337. * set the private field for a given byte offset in the tree. If there isn't
  1338. * an extent_state there already, this does nothing.
  1339. */
  1340. int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
  1341. {
  1342. struct rb_node *node;
  1343. struct extent_state *state;
  1344. int ret = 0;
  1345. spin_lock(&tree->lock);
  1346. /*
  1347. * this search will find all the extents that end after
  1348. * our range starts.
  1349. */
  1350. node = tree_search(tree, start);
  1351. if (!node) {
  1352. ret = -ENOENT;
  1353. goto out;
  1354. }
  1355. state = rb_entry(node, struct extent_state, rb_node);
  1356. if (state->start != start) {
  1357. ret = -ENOENT;
  1358. goto out;
  1359. }
  1360. state->private = private;
  1361. out:
  1362. spin_unlock(&tree->lock);
  1363. return ret;
  1364. }
  1365. int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
  1366. {
  1367. struct rb_node *node;
  1368. struct extent_state *state;
  1369. int ret = 0;
  1370. spin_lock(&tree->lock);
  1371. /*
  1372. * this search will find all the extents that end after
  1373. * our range starts.
  1374. */
  1375. node = tree_search(tree, start);
  1376. if (!node) {
  1377. ret = -ENOENT;
  1378. goto out;
  1379. }
  1380. state = rb_entry(node, struct extent_state, rb_node);
  1381. if (state->start != start) {
  1382. ret = -ENOENT;
  1383. goto out;
  1384. }
  1385. *private = state->private;
  1386. out:
  1387. spin_unlock(&tree->lock);
  1388. return ret;
  1389. }
  1390. /*
  1391. * searches a range in the state tree for a given mask.
  1392. * If 'filled' == 1, this returns 1 only if every extent in the tree
  1393. * has the bits set. Otherwise, 1 is returned if any bit in the
  1394. * range is found set.
  1395. */
  1396. int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
  1397. int bits, int filled)
  1398. {
  1399. struct extent_state *state = NULL;
  1400. struct rb_node *node;
  1401. int bitset = 0;
  1402. spin_lock(&tree->lock);
  1403. node = tree_search(tree, start);
  1404. while (node && start <= end) {
  1405. state = rb_entry(node, struct extent_state, rb_node);
  1406. if (filled && state->start > start) {
  1407. bitset = 0;
  1408. break;
  1409. }
  1410. if (state->start > end)
  1411. break;
  1412. if (state->state & bits) {
  1413. bitset = 1;
  1414. if (!filled)
  1415. break;
  1416. } else if (filled) {
  1417. bitset = 0;
  1418. break;
  1419. }
  1420. start = state->end + 1;
  1421. if (start > end)
  1422. break;
  1423. node = rb_next(node);
  1424. if (!node) {
  1425. if (filled)
  1426. bitset = 0;
  1427. break;
  1428. }
  1429. }
  1430. spin_unlock(&tree->lock);
  1431. return bitset;
  1432. }
  1433. /*
  1434. * helper function to set a given page up to date if all the
  1435. * extents in the tree for that page are up to date
  1436. */
  1437. static int check_page_uptodate(struct extent_io_tree *tree,
  1438. struct page *page)
  1439. {
  1440. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1441. u64 end = start + PAGE_CACHE_SIZE - 1;
  1442. if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
  1443. SetPageUptodate(page);
  1444. return 0;
  1445. }
  1446. /*
  1447. * helper function to unlock a page if all the extents in the tree
  1448. * for that page are unlocked
  1449. */
  1450. static int check_page_locked(struct extent_io_tree *tree,
  1451. struct page *page)
  1452. {
  1453. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1454. u64 end = start + PAGE_CACHE_SIZE - 1;
  1455. if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
  1456. unlock_page(page);
  1457. return 0;
  1458. }
  1459. /*
  1460. * helper function to end page writeback if all the extents
  1461. * in the tree for that page are done with writeback
  1462. */
  1463. static int check_page_writeback(struct extent_io_tree *tree,
  1464. struct page *page)
  1465. {
  1466. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1467. u64 end = start + PAGE_CACHE_SIZE - 1;
  1468. if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
  1469. end_page_writeback(page);
  1470. return 0;
  1471. }
  1472. /* lots and lots of room for performance fixes in the end_bio funcs */
  1473. /*
  1474. * after a writepage IO is done, we need to:
  1475. * clear the uptodate bits on error
  1476. * clear the writeback bits in the extent tree for this IO
  1477. * end_page_writeback if the page has no more pending IO
  1478. *
  1479. * Scheduling is not allowed, so the extent state tree is expected
  1480. * to have one and only one object corresponding to this IO.
  1481. */
  1482. static void end_bio_extent_writepage(struct bio *bio, int err)
  1483. {
  1484. int uptodate = err == 0;
  1485. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1486. struct extent_io_tree *tree;
  1487. u64 start;
  1488. u64 end;
  1489. int whole_page;
  1490. int ret;
  1491. do {
  1492. struct page *page = bvec->bv_page;
  1493. tree = &BTRFS_I(page->mapping->host)->io_tree;
  1494. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1495. bvec->bv_offset;
  1496. end = start + bvec->bv_len - 1;
  1497. if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
  1498. whole_page = 1;
  1499. else
  1500. whole_page = 0;
  1501. if (--bvec >= bio->bi_io_vec)
  1502. prefetchw(&bvec->bv_page->flags);
  1503. if (tree->ops && tree->ops->writepage_end_io_hook) {
  1504. ret = tree->ops->writepage_end_io_hook(page, start,
  1505. end, NULL, uptodate);
  1506. if (ret)
  1507. uptodate = 0;
  1508. }
  1509. if (!uptodate && tree->ops &&
  1510. tree->ops->writepage_io_failed_hook) {
  1511. ret = tree->ops->writepage_io_failed_hook(bio, page,
  1512. start, end, NULL);
  1513. if (ret == 0) {
  1514. uptodate = (err == 0);
  1515. continue;
  1516. }
  1517. }
  1518. if (!uptodate) {
  1519. clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1520. ClearPageUptodate(page);
  1521. SetPageError(page);
  1522. }
  1523. clear_extent_writeback(tree, start, end, GFP_ATOMIC);
  1524. if (whole_page)
  1525. end_page_writeback(page);
  1526. else
  1527. check_page_writeback(tree, page);
  1528. } while (bvec >= bio->bi_io_vec);
  1529. bio_put(bio);
  1530. }
  1531. /*
  1532. * after a readpage IO is done, we need to:
  1533. * clear the uptodate bits on error
  1534. * set the uptodate bits if things worked
  1535. * set the page up to date if all extents in the tree are uptodate
  1536. * clear the lock bit in the extent tree
  1537. * unlock the page if there are no other extents locked for it
  1538. *
  1539. * Scheduling is not allowed, so the extent state tree is expected
  1540. * to have one and only one object corresponding to this IO.
  1541. */
  1542. static void end_bio_extent_readpage(struct bio *bio, int err)
  1543. {
  1544. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1545. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1546. struct extent_io_tree *tree;
  1547. u64 start;
  1548. u64 end;
  1549. int whole_page;
  1550. int ret;
  1551. if (err)
  1552. uptodate = 0;
  1553. do {
  1554. struct page *page = bvec->bv_page;
  1555. tree = &BTRFS_I(page->mapping->host)->io_tree;
  1556. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1557. bvec->bv_offset;
  1558. end = start + bvec->bv_len - 1;
  1559. if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
  1560. whole_page = 1;
  1561. else
  1562. whole_page = 0;
  1563. if (--bvec >= bio->bi_io_vec)
  1564. prefetchw(&bvec->bv_page->flags);
  1565. if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
  1566. ret = tree->ops->readpage_end_io_hook(page, start, end,
  1567. NULL);
  1568. if (ret)
  1569. uptodate = 0;
  1570. }
  1571. if (!uptodate && tree->ops &&
  1572. tree->ops->readpage_io_failed_hook) {
  1573. ret = tree->ops->readpage_io_failed_hook(bio, page,
  1574. start, end, NULL);
  1575. if (ret == 0) {
  1576. uptodate =
  1577. test_bit(BIO_UPTODATE, &bio->bi_flags);
  1578. if (err)
  1579. uptodate = 0;
  1580. continue;
  1581. }
  1582. }
  1583. if (uptodate) {
  1584. set_extent_uptodate(tree, start, end,
  1585. GFP_ATOMIC);
  1586. }
  1587. unlock_extent(tree, start, end, GFP_ATOMIC);
  1588. if (whole_page) {
  1589. if (uptodate) {
  1590. SetPageUptodate(page);
  1591. } else {
  1592. ClearPageUptodate(page);
  1593. SetPageError(page);
  1594. }
  1595. unlock_page(page);
  1596. } else {
  1597. if (uptodate) {
  1598. check_page_uptodate(tree, page);
  1599. } else {
  1600. ClearPageUptodate(page);
  1601. SetPageError(page);
  1602. }
  1603. check_page_locked(tree, page);
  1604. }
  1605. } while (bvec >= bio->bi_io_vec);
  1606. bio_put(bio);
  1607. }
  1608. /*
  1609. * IO done from prepare_write is pretty simple, we just unlock
  1610. * the structs in the extent tree when done, and set the uptodate bits
  1611. * as appropriate.
  1612. */
  1613. static void end_bio_extent_preparewrite(struct bio *bio, int err)
  1614. {
  1615. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1616. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1617. struct extent_io_tree *tree;
  1618. u64 start;
  1619. u64 end;
  1620. do {
  1621. struct page *page = bvec->bv_page;
  1622. tree = &BTRFS_I(page->mapping->host)->io_tree;
  1623. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1624. bvec->bv_offset;
  1625. end = start + bvec->bv_len - 1;
  1626. if (--bvec >= bio->bi_io_vec)
  1627. prefetchw(&bvec->bv_page->flags);
  1628. if (uptodate) {
  1629. set_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1630. } else {
  1631. ClearPageUptodate(page);
  1632. SetPageError(page);
  1633. }
  1634. unlock_extent(tree, start, end, GFP_ATOMIC);
  1635. } while (bvec >= bio->bi_io_vec);
  1636. bio_put(bio);
  1637. }
  1638. static struct bio *
  1639. extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
  1640. gfp_t gfp_flags)
  1641. {
  1642. struct bio *bio;
  1643. bio = bio_alloc(gfp_flags, nr_vecs);
  1644. if (bio == NULL && (current->flags & PF_MEMALLOC)) {
  1645. while (!bio && (nr_vecs /= 2))
  1646. bio = bio_alloc(gfp_flags, nr_vecs);
  1647. }
  1648. if (bio) {
  1649. bio->bi_size = 0;
  1650. bio->bi_bdev = bdev;
  1651. bio->bi_sector = first_sector;
  1652. }
  1653. return bio;
  1654. }
  1655. static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
  1656. unsigned long bio_flags)
  1657. {
  1658. int ret = 0;
  1659. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1660. struct page *page = bvec->bv_page;
  1661. struct extent_io_tree *tree = bio->bi_private;
  1662. u64 start;
  1663. u64 end;
  1664. start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
  1665. end = start + bvec->bv_len - 1;
  1666. bio->bi_private = NULL;
  1667. bio_get(bio);
  1668. if (tree->ops && tree->ops->submit_bio_hook)
  1669. tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
  1670. mirror_num, bio_flags);
  1671. else
  1672. submit_bio(rw, bio);
  1673. if (bio_flagged(bio, BIO_EOPNOTSUPP))
  1674. ret = -EOPNOTSUPP;
  1675. bio_put(bio);
  1676. return ret;
  1677. }
  1678. static int submit_extent_page(int rw, struct extent_io_tree *tree,
  1679. struct page *page, sector_t sector,
  1680. size_t size, unsigned long offset,
  1681. struct block_device *bdev,
  1682. struct bio **bio_ret,
  1683. unsigned long max_pages,
  1684. bio_end_io_t end_io_func,
  1685. int mirror_num,
  1686. unsigned long prev_bio_flags,
  1687. unsigned long bio_flags)
  1688. {
  1689. int ret = 0;
  1690. struct bio *bio;
  1691. int nr;
  1692. int contig = 0;
  1693. int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
  1694. int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
  1695. size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
  1696. if (bio_ret && *bio_ret) {
  1697. bio = *bio_ret;
  1698. if (old_compressed)
  1699. contig = bio->bi_sector == sector;
  1700. else
  1701. contig = bio->bi_sector + (bio->bi_size >> 9) ==
  1702. sector;
  1703. if (prev_bio_flags != bio_flags || !contig ||
  1704. (tree->ops && tree->ops->merge_bio_hook &&
  1705. tree->ops->merge_bio_hook(page, offset, page_size, bio,
  1706. bio_flags)) ||
  1707. bio_add_page(bio, page, page_size, offset) < page_size) {
  1708. ret = submit_one_bio(rw, bio, mirror_num,
  1709. prev_bio_flags);
  1710. bio = NULL;
  1711. } else {
  1712. return 0;
  1713. }
  1714. }
  1715. if (this_compressed)
  1716. nr = BIO_MAX_PAGES;
  1717. else
  1718. nr = bio_get_nr_vecs(bdev);
  1719. bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
  1720. bio_add_page(bio, page, page_size, offset);
  1721. bio->bi_end_io = end_io_func;
  1722. bio->bi_private = tree;
  1723. if (bio_ret)
  1724. *bio_ret = bio;
  1725. else
  1726. ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
  1727. return ret;
  1728. }
  1729. void set_page_extent_mapped(struct page *page)
  1730. {
  1731. if (!PagePrivate(page)) {
  1732. SetPagePrivate(page);
  1733. page_cache_get(page);
  1734. set_page_private(page, EXTENT_PAGE_PRIVATE);
  1735. }
  1736. }
  1737. static void set_page_extent_head(struct page *page, unsigned long len)
  1738. {
  1739. set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
  1740. }
  1741. /*
  1742. * basic readpage implementation. Locked extent state structs are inserted
  1743. * into the tree that are removed when the IO is done (by the end_io
  1744. * handlers)
  1745. */
  1746. static int __extent_read_full_page(struct extent_io_tree *tree,
  1747. struct page *page,
  1748. get_extent_t *get_extent,
  1749. struct bio **bio, int mirror_num,
  1750. unsigned long *bio_flags)
  1751. {
  1752. struct inode *inode = page->mapping->host;
  1753. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1754. u64 page_end = start + PAGE_CACHE_SIZE - 1;
  1755. u64 end;
  1756. u64 cur = start;
  1757. u64 extent_offset;
  1758. u64 last_byte = i_size_read(inode);
  1759. u64 block_start;
  1760. u64 cur_end;
  1761. sector_t sector;
  1762. struct extent_map *em;
  1763. struct block_device *bdev;
  1764. int ret;
  1765. int nr = 0;
  1766. size_t page_offset = 0;
  1767. size_t iosize;
  1768. size_t disk_io_size;
  1769. size_t blocksize = inode->i_sb->s_blocksize;
  1770. unsigned long this_bio_flag = 0;
  1771. set_page_extent_mapped(page);
  1772. end = page_end;
  1773. lock_extent(tree, start, end, GFP_NOFS);
  1774. if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
  1775. char *userpage;
  1776. size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
  1777. if (zero_offset) {
  1778. iosize = PAGE_CACHE_SIZE - zero_offset;
  1779. userpage = kmap_atomic(page, KM_USER0);
  1780. memset(userpage + zero_offset, 0, iosize);
  1781. flush_dcache_page(page);
  1782. kunmap_atomic(userpage, KM_USER0);
  1783. }
  1784. }
  1785. while (cur <= end) {
  1786. if (cur >= last_byte) {
  1787. char *userpage;
  1788. iosize = PAGE_CACHE_SIZE - page_offset;
  1789. userpage = kmap_atomic(page, KM_USER0);
  1790. memset(userpage + page_offset, 0, iosize);
  1791. flush_dcache_page(page);
  1792. kunmap_atomic(userpage, KM_USER0);
  1793. set_extent_uptodate(tree, cur, cur + iosize - 1,
  1794. GFP_NOFS);
  1795. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1796. break;
  1797. }
  1798. em = get_extent(inode, page, page_offset, cur,
  1799. end - cur + 1, 0);
  1800. if (IS_ERR(em) || !em) {
  1801. SetPageError(page);
  1802. unlock_extent(tree, cur, end, GFP_NOFS);
  1803. break;
  1804. }
  1805. extent_offset = cur - em->start;
  1806. BUG_ON(extent_map_end(em) <= cur);
  1807. BUG_ON(end < cur);
  1808. if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
  1809. this_bio_flag = EXTENT_BIO_COMPRESSED;
  1810. iosize = min(extent_map_end(em) - cur, end - cur + 1);
  1811. cur_end = min(extent_map_end(em) - 1, end);
  1812. iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
  1813. if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
  1814. disk_io_size = em->block_len;
  1815. sector = em->block_start >> 9;
  1816. } else {
  1817. sector = (em->block_start + extent_offset) >> 9;
  1818. disk_io_size = iosize;
  1819. }
  1820. bdev = em->bdev;
  1821. block_start = em->block_start;
  1822. if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
  1823. block_start = EXTENT_MAP_HOLE;
  1824. free_extent_map(em);
  1825. em = NULL;
  1826. /* we've found a hole, just zero and go on */
  1827. if (block_start == EXTENT_MAP_HOLE) {
  1828. char *userpage;
  1829. userpage = kmap_atomic(page, KM_USER0);
  1830. memset(userpage + page_offset, 0, iosize);
  1831. flush_dcache_page(page);
  1832. kunmap_atomic(userpage, KM_USER0);
  1833. set_extent_uptodate(tree, cur, cur + iosize - 1,
  1834. GFP_NOFS);
  1835. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1836. cur = cur + iosize;
  1837. page_offset += iosize;
  1838. continue;
  1839. }
  1840. /* the get_extent function already copied into the page */
  1841. if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
  1842. check_page_uptodate(tree, page);
  1843. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1844. cur = cur + iosize;
  1845. page_offset += iosize;
  1846. continue;
  1847. }
  1848. /* we have an inline extent but it didn't get marked up
  1849. * to date. Error out
  1850. */
  1851. if (block_start == EXTENT_MAP_INLINE) {
  1852. SetPageError(page);
  1853. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1854. cur = cur + iosize;
  1855. page_offset += iosize;
  1856. continue;
  1857. }
  1858. ret = 0;
  1859. if (tree->ops && tree->ops->readpage_io_hook) {
  1860. ret = tree->ops->readpage_io_hook(page, cur,
  1861. cur + iosize - 1);
  1862. }
  1863. if (!ret) {
  1864. unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
  1865. pnr -= page->index;
  1866. ret = submit_extent_page(READ, tree, page,
  1867. sector, disk_io_size, page_offset,
  1868. bdev, bio, pnr,
  1869. end_bio_extent_readpage, mirror_num,
  1870. *bio_flags,
  1871. this_bio_flag);
  1872. nr++;
  1873. *bio_flags = this_bio_flag;
  1874. }
  1875. if (ret)
  1876. SetPageError(page);
  1877. cur = cur + iosize;
  1878. page_offset += iosize;
  1879. }
  1880. if (!nr) {
  1881. if (!PageError(page))
  1882. SetPageUptodate(page);
  1883. unlock_page(page);
  1884. }
  1885. return 0;
  1886. }
  1887. int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
  1888. get_extent_t *get_extent)
  1889. {
  1890. struct bio *bio = NULL;
  1891. unsigned long bio_flags = 0;
  1892. int ret;
  1893. ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
  1894. &bio_flags);
  1895. if (bio)
  1896. submit_one_bio(READ, bio, 0, bio_flags);
  1897. return ret;
  1898. }
  1899. /*
  1900. * the writepage semantics are similar to regular writepage. extent
  1901. * records are inserted to lock ranges in the tree, and as dirty areas
  1902. * are found, they are marked writeback. Then the lock bits are removed
  1903. * and the end_io handler clears the writeback ranges
  1904. */
  1905. static int __extent_writepage(struct page *page, struct writeback_control *wbc,
  1906. void *data)
  1907. {
  1908. struct inode *inode = page->mapping->host;
  1909. struct extent_page_data *epd = data;
  1910. struct extent_io_tree *tree = epd->tree;
  1911. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1912. u64 delalloc_start;
  1913. u64 page_end = start + PAGE_CACHE_SIZE - 1;
  1914. u64 end;
  1915. u64 cur = start;
  1916. u64 extent_offset;
  1917. u64 last_byte = i_size_read(inode);
  1918. u64 block_start;
  1919. u64 iosize;
  1920. u64 unlock_start;
  1921. sector_t sector;
  1922. struct extent_map *em;
  1923. struct block_device *bdev;
  1924. int ret;
  1925. int nr = 0;
  1926. size_t pg_offset = 0;
  1927. size_t blocksize;
  1928. loff_t i_size = i_size_read(inode);
  1929. unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
  1930. u64 nr_delalloc;
  1931. u64 delalloc_end;
  1932. int page_started;
  1933. int compressed;
  1934. unsigned long nr_written = 0;
  1935. WARN_ON(!PageLocked(page));
  1936. pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
  1937. if (page->index > end_index ||
  1938. (page->index == end_index && !pg_offset)) {
  1939. page->mapping->a_ops->invalidatepage(page, 0);
  1940. unlock_page(page);
  1941. return 0;
  1942. }
  1943. if (page->index == end_index) {
  1944. char *userpage;
  1945. userpage = kmap_atomic(page, KM_USER0);
  1946. memset(userpage + pg_offset, 0,
  1947. PAGE_CACHE_SIZE - pg_offset);
  1948. kunmap_atomic(userpage, KM_USER0);
  1949. flush_dcache_page(page);
  1950. }
  1951. pg_offset = 0;
  1952. set_page_extent_mapped(page);
  1953. delalloc_start = start;
  1954. delalloc_end = 0;
  1955. page_started = 0;
  1956. if (!epd->extent_locked) {
  1957. while (delalloc_end < page_end) {
  1958. nr_delalloc = find_lock_delalloc_range(inode, tree,
  1959. page,
  1960. &delalloc_start,
  1961. &delalloc_end,
  1962. 128 * 1024 * 1024);
  1963. if (nr_delalloc == 0) {
  1964. delalloc_start = delalloc_end + 1;
  1965. continue;
  1966. }
  1967. tree->ops->fill_delalloc(inode, page, delalloc_start,
  1968. delalloc_end, &page_started,
  1969. &nr_written);
  1970. delalloc_start = delalloc_end + 1;
  1971. }
  1972. /* did the fill delalloc function already unlock and start
  1973. * the IO?
  1974. */
  1975. if (page_started) {
  1976. ret = 0;
  1977. goto update_nr_written;
  1978. }
  1979. }
  1980. lock_extent(tree, start, page_end, GFP_NOFS);
  1981. unlock_start = start;
  1982. if (tree->ops && tree->ops->writepage_start_hook) {
  1983. ret = tree->ops->writepage_start_hook(page, start,
  1984. page_end);
  1985. if (ret == -EAGAIN) {
  1986. unlock_extent(tree, start, page_end, GFP_NOFS);
  1987. redirty_page_for_writepage(wbc, page);
  1988. unlock_page(page);
  1989. ret = 0;
  1990. goto update_nr_written;
  1991. }
  1992. }
  1993. nr_written++;
  1994. end = page_end;
  1995. if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0))
  1996. printk(KERN_ERR "btrfs delalloc bits after lock_extent\n");
  1997. if (last_byte <= start) {
  1998. clear_extent_dirty(tree, start, page_end, GFP_NOFS);
  1999. unlock_extent(tree, start, page_end, GFP_NOFS);
  2000. if (tree->ops && tree->ops->writepage_end_io_hook)
  2001. tree->ops->writepage_end_io_hook(page, start,
  2002. page_end, NULL, 1);
  2003. unlock_start = page_end + 1;
  2004. goto done;
  2005. }
  2006. set_extent_uptodate(tree, start, page_end, GFP_NOFS);
  2007. blocksize = inode->i_sb->s_blocksize;
  2008. while (cur <= end) {
  2009. if (cur >= last_byte) {
  2010. clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
  2011. unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
  2012. if (tree->ops && tree->ops->writepage_end_io_hook)
  2013. tree->ops->writepage_end_io_hook(page, cur,
  2014. page_end, NULL, 1);
  2015. unlock_start = page_end + 1;
  2016. break;
  2017. }
  2018. em = epd->get_extent(inode, page, pg_offset, cur,
  2019. end - cur + 1, 1);
  2020. if (IS_ERR(em) || !em) {
  2021. SetPageError(page);
  2022. break;
  2023. }
  2024. extent_offset = cur - em->start;
  2025. BUG_ON(extent_map_end(em) <= cur);
  2026. BUG_ON(end < cur);
  2027. iosize = min(extent_map_end(em) - cur, end - cur + 1);
  2028. iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
  2029. sector = (em->block_start + extent_offset) >> 9;
  2030. bdev = em->bdev;
  2031. block_start = em->block_start;
  2032. compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  2033. free_extent_map(em);
  2034. em = NULL;
  2035. /*
  2036. * compressed and inline extents are written through other
  2037. * paths in the FS
  2038. */
  2039. if (compressed || block_start == EXTENT_MAP_HOLE ||
  2040. block_start == EXTENT_MAP_INLINE) {
  2041. clear_extent_dirty(tree, cur,
  2042. cur + iosize - 1, GFP_NOFS);
  2043. unlock_extent(tree, unlock_start, cur + iosize - 1,
  2044. GFP_NOFS);
  2045. /*
  2046. * end_io notification does not happen here for
  2047. * compressed extents
  2048. */
  2049. if (!compressed && tree->ops &&
  2050. tree->ops->writepage_end_io_hook)
  2051. tree->ops->writepage_end_io_hook(page, cur,
  2052. cur + iosize - 1,
  2053. NULL, 1);
  2054. else if (compressed) {
  2055. /* we don't want to end_page_writeback on
  2056. * a compressed extent. this happens
  2057. * elsewhere
  2058. */
  2059. nr++;
  2060. }
  2061. cur += iosize;
  2062. pg_offset += iosize;
  2063. unlock_start = cur;
  2064. continue;
  2065. }
  2066. /* leave this out until we have a page_mkwrite call */
  2067. if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
  2068. EXTENT_DIRTY, 0)) {
  2069. cur = cur + iosize;
  2070. pg_offset += iosize;
  2071. continue;
  2072. }
  2073. clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
  2074. if (tree->ops && tree->ops->writepage_io_hook) {
  2075. ret = tree->ops->writepage_io_hook(page, cur,
  2076. cur + iosize - 1);
  2077. } else {
  2078. ret = 0;
  2079. }
  2080. if (ret) {
  2081. SetPageError(page);
  2082. } else {
  2083. unsigned long max_nr = end_index + 1;
  2084. set_range_writeback(tree, cur, cur + iosize - 1);
  2085. if (!PageWriteback(page)) {
  2086. printk(KERN_ERR "btrfs warning page %lu not "
  2087. "writeback, cur %llu end %llu\n",
  2088. page->index, (unsigned long long)cur,
  2089. (unsigned long long)end);
  2090. }
  2091. ret = submit_extent_page(WRITE, tree, page, sector,
  2092. iosize, pg_offset, bdev,
  2093. &epd->bio, max_nr,
  2094. end_bio_extent_writepage,
  2095. 0, 0, 0);
  2096. if (ret)
  2097. SetPageError(page);
  2098. }
  2099. cur = cur + iosize;
  2100. pg_offset += iosize;
  2101. nr++;
  2102. }
  2103. done:
  2104. if (nr == 0) {
  2105. /* make sure the mapping tag for page dirty gets cleared */
  2106. set_page_writeback(page);
  2107. end_page_writeback(page);
  2108. }
  2109. if (unlock_start <= page_end)
  2110. unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
  2111. unlock_page(page);
  2112. update_nr_written:
  2113. wbc->nr_to_write -= nr_written;
  2114. if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
  2115. wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
  2116. page->mapping->writeback_index = page->index + nr_written;
  2117. return 0;
  2118. }
  2119. /**
  2120. * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
  2121. * @mapping: address space structure to write
  2122. * @wbc: subtract the number of written pages from *@wbc->nr_to_write
  2123. * @writepage: function called for each page
  2124. * @data: data passed to writepage function
  2125. *
  2126. * If a page is already under I/O, write_cache_pages() skips it, even
  2127. * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
  2128. * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
  2129. * and msync() need to guarantee that all the data which was dirty at the time
  2130. * the call was made get new I/O started against them. If wbc->sync_mode is
  2131. * WB_SYNC_ALL then we were called for data integrity and we must wait for
  2132. * existing IO to complete.
  2133. */
  2134. static int extent_write_cache_pages(struct extent_io_tree *tree,
  2135. struct address_space *mapping,
  2136. struct writeback_control *wbc,
  2137. writepage_t writepage, void *data,
  2138. void (*flush_fn)(void *))
  2139. {
  2140. struct backing_dev_info *bdi = mapping->backing_dev_info;
  2141. int ret = 0;
  2142. int done = 0;
  2143. struct pagevec pvec;
  2144. int nr_pages;
  2145. pgoff_t index;
  2146. pgoff_t end; /* Inclusive */
  2147. int scanned = 0;
  2148. int range_whole = 0;
  2149. if (wbc->nonblocking && bdi_write_congested(bdi)) {
  2150. wbc->encountered_congestion = 1;
  2151. return 0;
  2152. }
  2153. pagevec_init(&pvec, 0);
  2154. if (wbc->range_cyclic) {
  2155. index = mapping->writeback_index; /* Start from prev offset */
  2156. end = -1;
  2157. } else {
  2158. index = wbc->range_start >> PAGE_CACHE_SHIFT;
  2159. end = wbc->range_end >> PAGE_CACHE_SHIFT;
  2160. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  2161. range_whole = 1;
  2162. scanned = 1;
  2163. }
  2164. retry:
  2165. while (!done && (index <= end) &&
  2166. (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  2167. PAGECACHE_TAG_DIRTY, min(end - index,
  2168. (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
  2169. unsigned i;
  2170. scanned = 1;
  2171. for (i = 0; i < nr_pages; i++) {
  2172. struct page *page = pvec.pages[i];
  2173. /*
  2174. * At this point we hold neither mapping->tree_lock nor
  2175. * lock on the page itself: the page may be truncated or
  2176. * invalidated (changing page->mapping to NULL), or even
  2177. * swizzled back from swapper_space to tmpfs file
  2178. * mapping
  2179. */
  2180. if (tree->ops && tree->ops->write_cache_pages_lock_hook)
  2181. tree->ops->write_cache_pages_lock_hook(page);
  2182. else
  2183. lock_page(page);
  2184. if (unlikely(page->mapping != mapping)) {
  2185. unlock_page(page);
  2186. continue;
  2187. }
  2188. if (!wbc->range_cyclic && page->index > end) {
  2189. done = 1;
  2190. unlock_page(page);
  2191. continue;
  2192. }
  2193. if (wbc->sync_mode != WB_SYNC_NONE) {
  2194. if (PageWriteback(page))
  2195. flush_fn(data);
  2196. wait_on_page_writeback(page);
  2197. }
  2198. if (PageWriteback(page) ||
  2199. !clear_page_dirty_for_io(page)) {
  2200. unlock_page(page);
  2201. continue;
  2202. }
  2203. ret = (*writepage)(page, wbc, data);
  2204. if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
  2205. unlock_page(page);
  2206. ret = 0;
  2207. }
  2208. if (ret || wbc->nr_to_write <= 0)
  2209. done = 1;
  2210. if (wbc->nonblocking && bdi_write_congested(bdi)) {
  2211. wbc->encountered_congestion = 1;
  2212. done = 1;
  2213. }
  2214. }
  2215. pagevec_release(&pvec);
  2216. cond_resched();
  2217. }
  2218. if (!scanned && !done) {
  2219. /*
  2220. * We hit the last page and there is more work to be done: wrap
  2221. * back to the start of the file
  2222. */
  2223. scanned = 1;
  2224. index = 0;
  2225. goto retry;
  2226. }
  2227. return ret;
  2228. }
  2229. static noinline void flush_write_bio(void *data)
  2230. {
  2231. struct extent_page_data *epd = data;
  2232. if (epd->bio) {
  2233. submit_one_bio(WRITE, epd->bio, 0, 0);
  2234. epd->bio = NULL;
  2235. }
  2236. }
  2237. int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
  2238. get_extent_t *get_extent,
  2239. struct writeback_control *wbc)
  2240. {
  2241. int ret;
  2242. struct address_space *mapping = page->mapping;
  2243. struct extent_page_data epd = {
  2244. .bio = NULL,
  2245. .tree = tree,
  2246. .get_extent = get_extent,
  2247. .extent_locked = 0,
  2248. };
  2249. struct writeback_control wbc_writepages = {
  2250. .bdi = wbc->bdi,
  2251. .sync_mode = WB_SYNC_NONE,
  2252. .older_than_this = NULL,
  2253. .nr_to_write = 64,
  2254. .range_start = page_offset(page) + PAGE_CACHE_SIZE,
  2255. .range_end = (loff_t)-1,
  2256. };
  2257. ret = __extent_writepage(page, wbc, &epd);
  2258. extent_write_cache_pages(tree, mapping, &wbc_writepages,
  2259. __extent_writepage, &epd, flush_write_bio);
  2260. if (epd.bio)
  2261. submit_one_bio(WRITE, epd.bio, 0, 0);
  2262. return ret;
  2263. }
  2264. int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
  2265. u64 start, u64 end, get_extent_t *get_extent,
  2266. int mode)
  2267. {
  2268. int ret = 0;
  2269. struct address_space *mapping = inode->i_mapping;
  2270. struct page *page;
  2271. unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
  2272. PAGE_CACHE_SHIFT;
  2273. struct extent_page_data epd = {
  2274. .bio = NULL,
  2275. .tree = tree,
  2276. .get_extent = get_extent,
  2277. .extent_locked = 1,
  2278. };
  2279. struct writeback_control wbc_writepages = {
  2280. .bdi = inode->i_mapping->backing_dev_info,
  2281. .sync_mode = mode,
  2282. .older_than_this = NULL,
  2283. .nr_to_write = nr_pages * 2,
  2284. .range_start = start,
  2285. .range_end = end + 1,
  2286. };
  2287. while (start <= end) {
  2288. page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
  2289. if (clear_page_dirty_for_io(page))
  2290. ret = __extent_writepage(page, &wbc_writepages, &epd);
  2291. else {
  2292. if (tree->ops && tree->ops->writepage_end_io_hook)
  2293. tree->ops->writepage_end_io_hook(page, start,
  2294. start + PAGE_CACHE_SIZE - 1,
  2295. NULL, 1);
  2296. unlock_page(page);
  2297. }
  2298. page_cache_release(page);
  2299. start += PAGE_CACHE_SIZE;
  2300. }
  2301. if (epd.bio)
  2302. submit_one_bio(WRITE, epd.bio, 0, 0);
  2303. return ret;
  2304. }
  2305. int extent_writepages(struct extent_io_tree *tree,
  2306. struct address_space *mapping,
  2307. get_extent_t *get_extent,
  2308. struct writeback_control *wbc)
  2309. {
  2310. int ret = 0;
  2311. struct extent_page_data epd = {
  2312. .bio = NULL,
  2313. .tree = tree,
  2314. .get_extent = get_extent,
  2315. .extent_locked = 0,
  2316. };
  2317. ret = extent_write_cache_pages(tree, mapping, wbc,
  2318. __extent_writepage, &epd,
  2319. flush_write_bio);
  2320. if (epd.bio)
  2321. submit_one_bio(WRITE, epd.bio, 0, 0);
  2322. return ret;
  2323. }
  2324. int extent_readpages(struct extent_io_tree *tree,
  2325. struct address_space *mapping,
  2326. struct list_head *pages, unsigned nr_pages,
  2327. get_extent_t get_extent)
  2328. {
  2329. struct bio *bio = NULL;
  2330. unsigned page_idx;
  2331. struct pagevec pvec;
  2332. unsigned long bio_flags = 0;
  2333. pagevec_init(&pvec, 0);
  2334. for (page_idx = 0; page_idx < nr_pages; page_idx++) {
  2335. struct page *page = list_entry(pages->prev, struct page, lru);
  2336. prefetchw(&page->flags);
  2337. list_del(&page->lru);
  2338. /*
  2339. * what we want to do here is call add_to_page_cache_lru,
  2340. * but that isn't exported, so we reproduce it here
  2341. */
  2342. if (!add_to_page_cache(page, mapping,
  2343. page->index, GFP_KERNEL)) {
  2344. /* open coding of lru_cache_add, also not exported */
  2345. page_cache_get(page);
  2346. if (!pagevec_add(&pvec, page))
  2347. __pagevec_lru_add_file(&pvec);
  2348. __extent_read_full_page(tree, page, get_extent,
  2349. &bio, 0, &bio_flags);
  2350. }
  2351. page_cache_release(page);
  2352. }
  2353. if (pagevec_count(&pvec))
  2354. __pagevec_lru_add_file(&pvec);
  2355. BUG_ON(!list_empty(pages));
  2356. if (bio)
  2357. submit_one_bio(READ, bio, 0, bio_flags);
  2358. return 0;
  2359. }
  2360. /*
  2361. * basic invalidatepage code, this waits on any locked or writeback
  2362. * ranges corresponding to the page, and then deletes any extent state
  2363. * records from the tree
  2364. */
  2365. int extent_invalidatepage(struct extent_io_tree *tree,
  2366. struct page *page, unsigned long offset)
  2367. {
  2368. u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
  2369. u64 end = start + PAGE_CACHE_SIZE - 1;
  2370. size_t blocksize = page->mapping->host->i_sb->s_blocksize;
  2371. start += (offset + blocksize - 1) & ~(blocksize - 1);
  2372. if (start > end)
  2373. return 0;
  2374. lock_extent(tree, start, end, GFP_NOFS);
  2375. wait_on_extent_writeback(tree, start, end);
  2376. clear_extent_bit(tree, start, end,
  2377. EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
  2378. 1, 1, GFP_NOFS);
  2379. return 0;
  2380. }
  2381. /*
  2382. * simple commit_write call, set_range_dirty is used to mark both
  2383. * the pages and the extent records as dirty
  2384. */
  2385. int extent_commit_write(struct extent_io_tree *tree,
  2386. struct inode *inode, struct page *page,
  2387. unsigned from, unsigned to)
  2388. {
  2389. loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
  2390. set_page_extent_mapped(page);
  2391. set_page_dirty(page);
  2392. if (pos > inode->i_size) {
  2393. i_size_write(inode, pos);
  2394. mark_inode_dirty(inode);
  2395. }
  2396. return 0;
  2397. }
  2398. int extent_prepare_write(struct extent_io_tree *tree,
  2399. struct inode *inode, struct page *page,
  2400. unsigned from, unsigned to, get_extent_t *get_extent)
  2401. {
  2402. u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  2403. u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
  2404. u64 block_start;
  2405. u64 orig_block_start;
  2406. u64 block_end;
  2407. u64 cur_end;
  2408. struct extent_map *em;
  2409. unsigned blocksize = 1 << inode->i_blkbits;
  2410. size_t page_offset = 0;
  2411. size_t block_off_start;
  2412. size_t block_off_end;
  2413. int err = 0;
  2414. int iocount = 0;
  2415. int ret = 0;
  2416. int isnew;
  2417. set_page_extent_mapped(page);
  2418. block_start = (page_start + from) & ~((u64)blocksize - 1);
  2419. block_end = (page_start + to - 1) | (blocksize - 1);
  2420. orig_block_start = block_start;
  2421. lock_extent(tree, page_start, page_end, GFP_NOFS);
  2422. while (block_start <= block_end) {
  2423. em = get_extent(inode, page, page_offset, block_start,
  2424. block_end - block_start + 1, 1);
  2425. if (IS_ERR(em) || !em)
  2426. goto err;
  2427. cur_end = min(block_end, extent_map_end(em) - 1);
  2428. block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
  2429. block_off_end = block_off_start + blocksize;
  2430. isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
  2431. if (!PageUptodate(page) && isnew &&
  2432. (block_off_end > to || block_off_start < from)) {
  2433. void *kaddr;
  2434. kaddr = kmap_atomic(page, KM_USER0);
  2435. if (block_off_end > to)
  2436. memset(kaddr + to, 0, block_off_end - to);
  2437. if (block_off_start < from)
  2438. memset(kaddr + block_off_start, 0,
  2439. from - block_off_start);
  2440. flush_dcache_page(page);
  2441. kunmap_atomic(kaddr, KM_USER0);
  2442. }
  2443. if ((em->block_start != EXTENT_MAP_HOLE &&
  2444. em->block_start != EXTENT_MAP_INLINE) &&
  2445. !isnew && !PageUptodate(page) &&
  2446. (block_off_end > to || block_off_start < from) &&
  2447. !test_range_bit(tree, block_start, cur_end,
  2448. EXTENT_UPTODATE, 1)) {
  2449. u64 sector;
  2450. u64 extent_offset = block_start - em->start;
  2451. size_t iosize;
  2452. sector = (em->block_start + extent_offset) >> 9;
  2453. iosize = (cur_end - block_start + blocksize) &
  2454. ~((u64)blocksize - 1);
  2455. /*
  2456. * we've already got the extent locked, but we
  2457. * need to split the state such that our end_bio
  2458. * handler can clear the lock.
  2459. */
  2460. set_extent_bit(tree, block_start,
  2461. block_start + iosize - 1,
  2462. EXTENT_LOCKED, 0, NULL, GFP_NOFS);
  2463. ret = submit_extent_page(READ, tree, page,
  2464. sector, iosize, page_offset, em->bdev,
  2465. NULL, 1,
  2466. end_bio_extent_preparewrite, 0,
  2467. 0, 0);
  2468. iocount++;
  2469. block_start = block_start + iosize;
  2470. } else {
  2471. set_extent_uptodate(tree, block_start, cur_end,
  2472. GFP_NOFS);
  2473. unlock_extent(tree, block_start, cur_end, GFP_NOFS);
  2474. block_start = cur_end + 1;
  2475. }
  2476. page_offset = block_start & (PAGE_CACHE_SIZE - 1);
  2477. free_extent_map(em);
  2478. }
  2479. if (iocount) {
  2480. wait_extent_bit(tree, orig_block_start,
  2481. block_end, EXTENT_LOCKED);
  2482. }
  2483. check_page_uptodate(tree, page);
  2484. err:
  2485. /* FIXME, zero out newly allocated blocks on error */
  2486. return err;
  2487. }
  2488. /*
  2489. * a helper for releasepage, this tests for areas of the page that
  2490. * are locked or under IO and drops the related state bits if it is safe
  2491. * to drop the page.
  2492. */
  2493. int try_release_extent_state(struct extent_map_tree *map,
  2494. struct extent_io_tree *tree, struct page *page,
  2495. gfp_t mask)
  2496. {
  2497. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  2498. u64 end = start + PAGE_CACHE_SIZE - 1;
  2499. int ret = 1;
  2500. if (test_range_bit(tree, start, end,
  2501. EXTENT_IOBITS | EXTENT_ORDERED, 0))
  2502. ret = 0;
  2503. else {
  2504. if ((mask & GFP_NOFS) == GFP_NOFS)
  2505. mask = GFP_NOFS;
  2506. clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
  2507. 1, 1, mask);
  2508. }
  2509. return ret;
  2510. }
  2511. /*
  2512. * a helper for releasepage. As long as there are no locked extents
  2513. * in the range corresponding to the page, both state records and extent
  2514. * map records are removed
  2515. */
  2516. int try_release_extent_mapping(struct extent_map_tree *map,
  2517. struct extent_io_tree *tree, struct page *page,
  2518. gfp_t mask)
  2519. {
  2520. struct extent_map *em;
  2521. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  2522. u64 end = start + PAGE_CACHE_SIZE - 1;
  2523. if ((mask & __GFP_WAIT) &&
  2524. page->mapping->host->i_size > 16 * 1024 * 1024) {
  2525. u64 len;
  2526. while (start <= end) {
  2527. len = end - start + 1;
  2528. spin_lock(&map->lock);
  2529. em = lookup_extent_mapping(map, start, len);
  2530. if (!em || IS_ERR(em)) {
  2531. spin_unlock(&map->lock);
  2532. break;
  2533. }
  2534. if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
  2535. em->start != start) {
  2536. spin_unlock(&map->lock);
  2537. free_extent_map(em);
  2538. break;
  2539. }
  2540. if (!test_range_bit(tree, em->start,
  2541. extent_map_end(em) - 1,
  2542. EXTENT_LOCKED | EXTENT_WRITEBACK |
  2543. EXTENT_ORDERED,
  2544. 0)) {
  2545. remove_extent_mapping(map, em);
  2546. /* once for the rb tree */
  2547. free_extent_map(em);
  2548. }
  2549. start = extent_map_end(em);
  2550. spin_unlock(&map->lock);
  2551. /* once for us */
  2552. free_extent_map(em);
  2553. }
  2554. }
  2555. return try_release_extent_state(map, tree, page, mask);
  2556. }
  2557. sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
  2558. get_extent_t *get_extent)
  2559. {
  2560. struct inode *inode = mapping->host;
  2561. u64 start = iblock << inode->i_blkbits;
  2562. sector_t sector = 0;
  2563. size_t blksize = (1 << inode->i_blkbits);
  2564. struct extent_map *em;
  2565. lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
  2566. GFP_NOFS);
  2567. em = get_extent(inode, NULL, 0, start, blksize, 0);
  2568. unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
  2569. GFP_NOFS);
  2570. if (!em || IS_ERR(em))
  2571. return 0;
  2572. if (em->block_start > EXTENT_MAP_LAST_BYTE)
  2573. goto out;
  2574. sector = (em->block_start + start - em->start) >> inode->i_blkbits;
  2575. out:
  2576. free_extent_map(em);
  2577. return sector;
  2578. }
  2579. static inline struct page *extent_buffer_page(struct extent_buffer *eb,
  2580. unsigned long i)
  2581. {
  2582. struct page *p;
  2583. struct address_space *mapping;
  2584. if (i == 0)
  2585. return eb->first_page;
  2586. i += eb->start >> PAGE_CACHE_SHIFT;
  2587. mapping = eb->first_page->mapping;
  2588. if (!mapping)
  2589. return NULL;
  2590. /*
  2591. * extent_buffer_page is only called after pinning the page
  2592. * by increasing the reference count. So we know the page must
  2593. * be in the radix tree.
  2594. */
  2595. rcu_read_lock();
  2596. p = radix_tree_lookup(&mapping->page_tree, i);
  2597. rcu_read_unlock();
  2598. return p;
  2599. }
  2600. static inline unsigned long num_extent_pages(u64 start, u64 len)
  2601. {
  2602. return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
  2603. (start >> PAGE_CACHE_SHIFT);
  2604. }
  2605. static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
  2606. u64 start,
  2607. unsigned long len,
  2608. gfp_t mask)
  2609. {
  2610. struct extent_buffer *eb = NULL;
  2611. #ifdef LEAK_DEBUG
  2612. unsigned long flags;
  2613. #endif
  2614. eb = kmem_cache_zalloc(extent_buffer_cache, mask);
  2615. eb->start = start;
  2616. eb->len = len;
  2617. mutex_init(&eb->mutex);
  2618. #ifdef LEAK_DEBUG
  2619. spin_lock_irqsave(&leak_lock, flags);
  2620. list_add(&eb->leak_list, &buffers);
  2621. spin_unlock_irqrestore(&leak_lock, flags);
  2622. #endif
  2623. atomic_set(&eb->refs, 1);
  2624. return eb;
  2625. }
  2626. static void __free_extent_buffer(struct extent_buffer *eb)
  2627. {
  2628. #ifdef LEAK_DEBUG
  2629. unsigned long flags;
  2630. spin_lock_irqsave(&leak_lock, flags);
  2631. list_del(&eb->leak_list);
  2632. spin_unlock_irqrestore(&leak_lock, flags);
  2633. #endif
  2634. kmem_cache_free(extent_buffer_cache, eb);
  2635. }
  2636. struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
  2637. u64 start, unsigned long len,
  2638. struct page *page0,
  2639. gfp_t mask)
  2640. {
  2641. unsigned long num_pages = num_extent_pages(start, len);
  2642. unsigned long i;
  2643. unsigned long index = start >> PAGE_CACHE_SHIFT;
  2644. struct extent_buffer *eb;
  2645. struct extent_buffer *exists = NULL;
  2646. struct page *p;
  2647. struct address_space *mapping = tree->mapping;
  2648. int uptodate = 1;
  2649. spin_lock(&tree->buffer_lock);
  2650. eb = buffer_search(tree, start);
  2651. if (eb) {
  2652. atomic_inc(&eb->refs);
  2653. spin_unlock(&tree->buffer_lock);
  2654. mark_page_accessed(eb->first_page);
  2655. return eb;
  2656. }
  2657. spin_unlock(&tree->buffer_lock);
  2658. eb = __alloc_extent_buffer(tree, start, len, mask);
  2659. if (!eb)
  2660. return NULL;
  2661. if (page0) {
  2662. eb->first_page = page0;
  2663. i = 1;
  2664. index++;
  2665. page_cache_get(page0);
  2666. mark_page_accessed(page0);
  2667. set_page_extent_mapped(page0);
  2668. set_page_extent_head(page0, len);
  2669. uptodate = PageUptodate(page0);
  2670. } else {
  2671. i = 0;
  2672. }
  2673. for (; i < num_pages; i++, index++) {
  2674. p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
  2675. if (!p) {
  2676. WARN_ON(1);
  2677. goto free_eb;
  2678. }
  2679. set_page_extent_mapped(p);
  2680. mark_page_accessed(p);
  2681. if (i == 0) {
  2682. eb->first_page = p;
  2683. set_page_extent_head(p, len);
  2684. } else {
  2685. set_page_private(p, EXTENT_PAGE_PRIVATE);
  2686. }
  2687. if (!PageUptodate(p))
  2688. uptodate = 0;
  2689. unlock_page(p);
  2690. }
  2691. if (uptodate)
  2692. eb->flags |= EXTENT_UPTODATE;
  2693. eb->flags |= EXTENT_BUFFER_FILLED;
  2694. spin_lock(&tree->buffer_lock);
  2695. exists = buffer_tree_insert(tree, start, &eb->rb_node);
  2696. if (exists) {
  2697. /* add one reference for the caller */
  2698. atomic_inc(&exists->refs);
  2699. spin_unlock(&tree->buffer_lock);
  2700. goto free_eb;
  2701. }
  2702. spin_unlock(&tree->buffer_lock);
  2703. /* add one reference for the tree */
  2704. atomic_inc(&eb->refs);
  2705. return eb;
  2706. free_eb:
  2707. if (!atomic_dec_and_test(&eb->refs))
  2708. return exists;
  2709. for (index = 1; index < i; index++)
  2710. page_cache_release(extent_buffer_page(eb, index));
  2711. page_cache_release(extent_buffer_page(eb, 0));
  2712. __free_extent_buffer(eb);
  2713. return exists;
  2714. }
  2715. struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
  2716. u64 start, unsigned long len,
  2717. gfp_t mask)
  2718. {
  2719. struct extent_buffer *eb;
  2720. spin_lock(&tree->buffer_lock);
  2721. eb = buffer_search(tree, start);
  2722. if (eb)
  2723. atomic_inc(&eb->refs);
  2724. spin_unlock(&tree->buffer_lock);
  2725. if (eb)
  2726. mark_page_accessed(eb->first_page);
  2727. return eb;
  2728. }
  2729. void free_extent_buffer(struct extent_buffer *eb)
  2730. {
  2731. if (!eb)
  2732. return;
  2733. if (!atomic_dec_and_test(&eb->refs))
  2734. return;
  2735. WARN_ON(1);
  2736. }
  2737. int clear_extent_buffer_dirty(struct extent_io_tree *tree,
  2738. struct extent_buffer *eb)
  2739. {
  2740. int set;
  2741. unsigned long i;
  2742. unsigned long num_pages;
  2743. struct page *page;
  2744. u64 start = eb->start;
  2745. u64 end = start + eb->len - 1;
  2746. set = clear_extent_dirty(tree, start, end, GFP_NOFS);
  2747. num_pages = num_extent_pages(eb->start, eb->len);
  2748. for (i = 0; i < num_pages; i++) {
  2749. page = extent_buffer_page(eb, i);
  2750. if (!set && !PageDirty(page))
  2751. continue;
  2752. lock_page(page);
  2753. if (i == 0)
  2754. set_page_extent_head(page, eb->len);
  2755. else
  2756. set_page_private(page, EXTENT_PAGE_PRIVATE);
  2757. /*
  2758. * if we're on the last page or the first page and the
  2759. * block isn't aligned on a page boundary, do extra checks
  2760. * to make sure we don't clean page that is partially dirty
  2761. */
  2762. if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
  2763. ((i == num_pages - 1) &&
  2764. ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
  2765. start = (u64)page->index << PAGE_CACHE_SHIFT;
  2766. end = start + PAGE_CACHE_SIZE - 1;
  2767. if (test_range_bit(tree, start, end,
  2768. EXTENT_DIRTY, 0)) {
  2769. unlock_page(page);
  2770. continue;
  2771. }
  2772. }
  2773. clear_page_dirty_for_io(page);
  2774. spin_lock_irq(&page->mapping->tree_lock);
  2775. if (!PageDirty(page)) {
  2776. radix_tree_tag_clear(&page->mapping->page_tree,
  2777. page_index(page),
  2778. PAGECACHE_TAG_DIRTY);
  2779. }
  2780. spin_unlock_irq(&page->mapping->tree_lock);
  2781. unlock_page(page);
  2782. }
  2783. return 0;
  2784. }
  2785. int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
  2786. struct extent_buffer *eb)
  2787. {
  2788. return wait_on_extent_writeback(tree, eb->start,
  2789. eb->start + eb->len - 1);
  2790. }
  2791. int set_extent_buffer_dirty(struct extent_io_tree *tree,
  2792. struct extent_buffer *eb)
  2793. {
  2794. unsigned long i;
  2795. unsigned long num_pages;
  2796. num_pages = num_extent_pages(eb->start, eb->len);
  2797. for (i = 0; i < num_pages; i++) {
  2798. struct page *page = extent_buffer_page(eb, i);
  2799. /* writepage may need to do something special for the
  2800. * first page, we have to make sure page->private is
  2801. * properly set. releasepage may drop page->private
  2802. * on us if the page isn't already dirty.
  2803. */
  2804. lock_page(page);
  2805. if (i == 0) {
  2806. set_page_extent_head(page, eb->len);
  2807. } else if (PagePrivate(page) &&
  2808. page->private != EXTENT_PAGE_PRIVATE) {
  2809. set_page_extent_mapped(page);
  2810. }
  2811. __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
  2812. set_extent_dirty(tree, page_offset(page),
  2813. page_offset(page) + PAGE_CACHE_SIZE - 1,
  2814. GFP_NOFS);
  2815. unlock_page(page);
  2816. }
  2817. return 0;
  2818. }
  2819. int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
  2820. struct extent_buffer *eb)
  2821. {
  2822. unsigned long i;
  2823. struct page *page;
  2824. unsigned long num_pages;
  2825. num_pages = num_extent_pages(eb->start, eb->len);
  2826. eb->flags &= ~EXTENT_UPTODATE;
  2827. clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
  2828. GFP_NOFS);
  2829. for (i = 0; i < num_pages; i++) {
  2830. page = extent_buffer_page(eb, i);
  2831. if (page)
  2832. ClearPageUptodate(page);
  2833. }
  2834. return 0;
  2835. }
  2836. int set_extent_buffer_uptodate(struct extent_io_tree *tree,
  2837. struct extent_buffer *eb)
  2838. {
  2839. unsigned long i;
  2840. struct page *page;
  2841. unsigned long num_pages;
  2842. num_pages = num_extent_pages(eb->start, eb->len);
  2843. set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
  2844. GFP_NOFS);
  2845. for (i = 0; i < num_pages; i++) {
  2846. page = extent_buffer_page(eb, i);
  2847. if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
  2848. ((i == num_pages - 1) &&
  2849. ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
  2850. check_page_uptodate(tree, page);
  2851. continue;
  2852. }
  2853. SetPageUptodate(page);
  2854. }
  2855. return 0;
  2856. }
  2857. int extent_range_uptodate(struct extent_io_tree *tree,
  2858. u64 start, u64 end)
  2859. {
  2860. struct page *page;
  2861. int ret;
  2862. int pg_uptodate = 1;
  2863. int uptodate;
  2864. unsigned long index;
  2865. ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
  2866. if (ret)
  2867. return 1;
  2868. while (start <= end) {
  2869. index = start >> PAGE_CACHE_SHIFT;
  2870. page = find_get_page(tree->mapping, index);
  2871. uptodate = PageUptodate(page);
  2872. page_cache_release(page);
  2873. if (!uptodate) {
  2874. pg_uptodate = 0;
  2875. break;
  2876. }
  2877. start += PAGE_CACHE_SIZE;
  2878. }
  2879. return pg_uptodate;
  2880. }
  2881. int extent_buffer_uptodate(struct extent_io_tree *tree,
  2882. struct extent_buffer *eb)
  2883. {
  2884. int ret = 0;
  2885. unsigned long num_pages;
  2886. unsigned long i;
  2887. struct page *page;
  2888. int pg_uptodate = 1;
  2889. if (eb->flags & EXTENT_UPTODATE)
  2890. return 1;
  2891. ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
  2892. EXTENT_UPTODATE, 1);
  2893. if (ret)
  2894. return ret;
  2895. num_pages = num_extent_pages(eb->start, eb->len);
  2896. for (i = 0; i < num_pages; i++) {
  2897. page = extent_buffer_page(eb, i);
  2898. if (!PageUptodate(page)) {
  2899. pg_uptodate = 0;
  2900. break;
  2901. }
  2902. }
  2903. return pg_uptodate;
  2904. }
  2905. int read_extent_buffer_pages(struct extent_io_tree *tree,
  2906. struct extent_buffer *eb,
  2907. u64 start, int wait,
  2908. get_extent_t *get_extent, int mirror_num)
  2909. {
  2910. unsigned long i;
  2911. unsigned long start_i;
  2912. struct page *page;
  2913. int err;
  2914. int ret = 0;
  2915. int locked_pages = 0;
  2916. int all_uptodate = 1;
  2917. int inc_all_pages = 0;
  2918. unsigned long num_pages;
  2919. struct bio *bio = NULL;
  2920. unsigned long bio_flags = 0;
  2921. if (eb->flags & EXTENT_UPTODATE)
  2922. return 0;
  2923. if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
  2924. EXTENT_UPTODATE, 1)) {
  2925. return 0;
  2926. }
  2927. if (start) {
  2928. WARN_ON(start < eb->start);
  2929. start_i = (start >> PAGE_CACHE_SHIFT) -
  2930. (eb->start >> PAGE_CACHE_SHIFT);
  2931. } else {
  2932. start_i = 0;
  2933. }
  2934. num_pages = num_extent_pages(eb->start, eb->len);
  2935. for (i = start_i; i < num_pages; i++) {
  2936. page = extent_buffer_page(eb, i);
  2937. if (!wait) {
  2938. if (!trylock_page(page))
  2939. goto unlock_exit;
  2940. } else {
  2941. lock_page(page);
  2942. }
  2943. locked_pages++;
  2944. if (!PageUptodate(page))
  2945. all_uptodate = 0;
  2946. }
  2947. if (all_uptodate) {
  2948. if (start_i == 0)
  2949. eb->flags |= EXTENT_UPTODATE;
  2950. goto unlock_exit;
  2951. }
  2952. for (i = start_i; i < num_pages; i++) {
  2953. page = extent_buffer_page(eb, i);
  2954. if (inc_all_pages)
  2955. page_cache_get(page);
  2956. if (!PageUptodate(page)) {
  2957. if (start_i == 0)
  2958. inc_all_pages = 1;
  2959. ClearPageError(page);
  2960. err = __extent_read_full_page(tree, page,
  2961. get_extent, &bio,
  2962. mirror_num, &bio_flags);
  2963. if (err)
  2964. ret = err;
  2965. } else {
  2966. unlock_page(page);
  2967. }
  2968. }
  2969. if (bio)
  2970. submit_one_bio(READ, bio, mirror_num, bio_flags);
  2971. if (ret || !wait)
  2972. return ret;
  2973. for (i = start_i; i < num_pages; i++) {
  2974. page = extent_buffer_page(eb, i);
  2975. wait_on_page_locked(page);
  2976. if (!PageUptodate(page))
  2977. ret = -EIO;
  2978. }
  2979. if (!ret)
  2980. eb->flags |= EXTENT_UPTODATE;
  2981. return ret;
  2982. unlock_exit:
  2983. i = start_i;
  2984. while (locked_pages > 0) {
  2985. page = extent_buffer_page(eb, i);
  2986. i++;
  2987. unlock_page(page);
  2988. locked_pages--;
  2989. }
  2990. return ret;
  2991. }
  2992. void read_extent_buffer(struct extent_buffer *eb, void *dstv,
  2993. unsigned long start,
  2994. unsigned long len)
  2995. {
  2996. size_t cur;
  2997. size_t offset;
  2998. struct page *page;
  2999. char *kaddr;
  3000. char *dst = (char *)dstv;
  3001. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  3002. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  3003. WARN_ON(start > eb->len);
  3004. WARN_ON(start + len > eb->start + eb->len);
  3005. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  3006. while (len > 0) {
  3007. page = extent_buffer_page(eb, i);
  3008. cur = min(len, (PAGE_CACHE_SIZE - offset));
  3009. kaddr = kmap_atomic(page, KM_USER1);
  3010. memcpy(dst, kaddr + offset, cur);
  3011. kunmap_atomic(kaddr, KM_USER1);
  3012. dst += cur;
  3013. len -= cur;
  3014. offset = 0;
  3015. i++;
  3016. }
  3017. }
  3018. int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
  3019. unsigned long min_len, char **token, char **map,
  3020. unsigned long *map_start,
  3021. unsigned long *map_len, int km)
  3022. {
  3023. size_t offset = start & (PAGE_CACHE_SIZE - 1);
  3024. char *kaddr;
  3025. struct page *p;
  3026. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  3027. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  3028. unsigned long end_i = (start_offset + start + min_len - 1) >>
  3029. PAGE_CACHE_SHIFT;
  3030. if (i != end_i)
  3031. return -EINVAL;
  3032. if (i == 0) {
  3033. offset = start_offset;
  3034. *map_start = 0;
  3035. } else {
  3036. offset = 0;
  3037. *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
  3038. }
  3039. if (start + min_len > eb->len) {
  3040. printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
  3041. "wanted %lu %lu\n", (unsigned long long)eb->start,
  3042. eb->len, start, min_len);
  3043. WARN_ON(1);
  3044. }
  3045. p = extent_buffer_page(eb, i);
  3046. kaddr = kmap_atomic(p, km);
  3047. *token = kaddr;
  3048. *map = kaddr + offset;
  3049. *map_len = PAGE_CACHE_SIZE - offset;
  3050. return 0;
  3051. }
  3052. int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
  3053. unsigned long min_len,
  3054. char **token, char **map,
  3055. unsigned long *map_start,
  3056. unsigned long *map_len, int km)
  3057. {
  3058. int err;
  3059. int save = 0;
  3060. if (eb->map_token) {
  3061. unmap_extent_buffer(eb, eb->map_token, km);
  3062. eb->map_token = NULL;
  3063. save = 1;
  3064. WARN_ON(!mutex_is_locked(&eb->mutex));
  3065. }
  3066. err = map_private_extent_buffer(eb, start, min_len, token, map,
  3067. map_start, map_len, km);
  3068. if (!err && save) {
  3069. eb->map_token = *token;
  3070. eb->kaddr = *map;
  3071. eb->map_start = *map_start;
  3072. eb->map_len = *map_len;
  3073. }
  3074. return err;
  3075. }
  3076. void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
  3077. {
  3078. kunmap_atomic(token, km);
  3079. }
  3080. int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
  3081. unsigned long start,
  3082. unsigned long len)
  3083. {
  3084. size_t cur;
  3085. size_t offset;
  3086. struct page *page;
  3087. char *kaddr;
  3088. char *ptr = (char *)ptrv;
  3089. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  3090. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  3091. int ret = 0;
  3092. WARN_ON(start > eb->len);
  3093. WARN_ON(start + len > eb->start + eb->len);
  3094. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  3095. while (len > 0) {
  3096. page = extent_buffer_page(eb, i);
  3097. cur = min(len, (PAGE_CACHE_SIZE - offset));
  3098. kaddr = kmap_atomic(page, KM_USER0);
  3099. ret = memcmp(ptr, kaddr + offset, cur);
  3100. kunmap_atomic(kaddr, KM_USER0);
  3101. if (ret)
  3102. break;
  3103. ptr += cur;
  3104. len -= cur;
  3105. offset = 0;
  3106. i++;
  3107. }
  3108. return ret;
  3109. }
  3110. void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
  3111. unsigned long start, unsigned long len)
  3112. {
  3113. size_t cur;
  3114. size_t offset;
  3115. struct page *page;
  3116. char *kaddr;
  3117. char *src = (char *)srcv;
  3118. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  3119. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  3120. WARN_ON(start > eb->len);
  3121. WARN_ON(start + len > eb->start + eb->len);
  3122. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  3123. while (len > 0) {
  3124. page = extent_buffer_page(eb, i);
  3125. WARN_ON(!PageUptodate(page));
  3126. cur = min(len, PAGE_CACHE_SIZE - offset);
  3127. kaddr = kmap_atomic(page, KM_USER1);
  3128. memcpy(kaddr + offset, src, cur);
  3129. kunmap_atomic(kaddr, KM_USER1);
  3130. src += cur;
  3131. len -= cur;
  3132. offset = 0;
  3133. i++;
  3134. }
  3135. }
  3136. void memset_extent_buffer(struct extent_buffer *eb, char c,
  3137. unsigned long start, unsigned long len)
  3138. {
  3139. size_t cur;
  3140. size_t offset;
  3141. struct page *page;
  3142. char *kaddr;
  3143. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  3144. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  3145. WARN_ON(start > eb->len);
  3146. WARN_ON(start + len > eb->start + eb->len);
  3147. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  3148. while (len > 0) {
  3149. page = extent_buffer_page(eb, i);
  3150. WARN_ON(!PageUptodate(page));
  3151. cur = min(len, PAGE_CACHE_SIZE - offset);
  3152. kaddr = kmap_atomic(page, KM_USER0);
  3153. memset(kaddr + offset, c, cur);
  3154. kunmap_atomic(kaddr, KM_USER0);
  3155. len -= cur;
  3156. offset = 0;
  3157. i++;
  3158. }
  3159. }
  3160. void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
  3161. unsigned long dst_offset, unsigned long src_offset,
  3162. unsigned long len)
  3163. {
  3164. u64 dst_len = dst->len;
  3165. size_t cur;
  3166. size_t offset;
  3167. struct page *page;
  3168. char *kaddr;
  3169. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  3170. unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
  3171. WARN_ON(src->len != dst_len);
  3172. offset = (start_offset + dst_offset) &
  3173. ((unsigned long)PAGE_CACHE_SIZE - 1);
  3174. while (len > 0) {
  3175. page = extent_buffer_page(dst, i);
  3176. WARN_ON(!PageUptodate(page));
  3177. cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
  3178. kaddr = kmap_atomic(page, KM_USER0);
  3179. read_extent_buffer(src, kaddr + offset, src_offset, cur);
  3180. kunmap_atomic(kaddr, KM_USER0);
  3181. src_offset += cur;
  3182. len -= cur;
  3183. offset = 0;
  3184. i++;
  3185. }
  3186. }
  3187. static void move_pages(struct page *dst_page, struct page *src_page,
  3188. unsigned long dst_off, unsigned long src_off,
  3189. unsigned long len)
  3190. {
  3191. char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
  3192. if (dst_page == src_page) {
  3193. memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
  3194. } else {
  3195. char *src_kaddr = kmap_atomic(src_page, KM_USER1);
  3196. char *p = dst_kaddr + dst_off + len;
  3197. char *s = src_kaddr + src_off + len;
  3198. while (len--)
  3199. *--p = *--s;
  3200. kunmap_atomic(src_kaddr, KM_USER1);
  3201. }
  3202. kunmap_atomic(dst_kaddr, KM_USER0);
  3203. }
  3204. static void copy_pages(struct page *dst_page, struct page *src_page,
  3205. unsigned long dst_off, unsigned long src_off,
  3206. unsigned long len)
  3207. {
  3208. char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
  3209. char *src_kaddr;
  3210. if (dst_page != src_page)
  3211. src_kaddr = kmap_atomic(src_page, KM_USER1);
  3212. else
  3213. src_kaddr = dst_kaddr;
  3214. memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
  3215. kunmap_atomic(dst_kaddr, KM_USER0);
  3216. if (dst_page != src_page)
  3217. kunmap_atomic(src_kaddr, KM_USER1);
  3218. }
  3219. void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  3220. unsigned long src_offset, unsigned long len)
  3221. {
  3222. size_t cur;
  3223. size_t dst_off_in_page;
  3224. size_t src_off_in_page;
  3225. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  3226. unsigned long dst_i;
  3227. unsigned long src_i;
  3228. if (src_offset + len > dst->len) {
  3229. printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
  3230. "len %lu dst len %lu\n", src_offset, len, dst->len);
  3231. BUG_ON(1);
  3232. }
  3233. if (dst_offset + len > dst->len) {
  3234. printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
  3235. "len %lu dst len %lu\n", dst_offset, len, dst->len);
  3236. BUG_ON(1);
  3237. }
  3238. while (len > 0) {
  3239. dst_off_in_page = (start_offset + dst_offset) &
  3240. ((unsigned long)PAGE_CACHE_SIZE - 1);
  3241. src_off_in_page = (start_offset + src_offset) &
  3242. ((unsigned long)PAGE_CACHE_SIZE - 1);
  3243. dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
  3244. src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
  3245. cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
  3246. src_off_in_page));
  3247. cur = min_t(unsigned long, cur,
  3248. (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
  3249. copy_pages(extent_buffer_page(dst, dst_i),
  3250. extent_buffer_page(dst, src_i),
  3251. dst_off_in_page, src_off_in_page, cur);
  3252. src_offset += cur;
  3253. dst_offset += cur;
  3254. len -= cur;
  3255. }
  3256. }
  3257. void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  3258. unsigned long src_offset, unsigned long len)
  3259. {
  3260. size_t cur;
  3261. size_t dst_off_in_page;
  3262. size_t src_off_in_page;
  3263. unsigned long dst_end = dst_offset + len - 1;
  3264. unsigned long src_end = src_offset + len - 1;
  3265. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  3266. unsigned long dst_i;
  3267. unsigned long src_i;
  3268. if (src_offset + len > dst->len) {
  3269. printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
  3270. "len %lu len %lu\n", src_offset, len, dst->len);
  3271. BUG_ON(1);
  3272. }
  3273. if (dst_offset + len > dst->len) {
  3274. printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
  3275. "len %lu len %lu\n", dst_offset, len, dst->len);
  3276. BUG_ON(1);
  3277. }
  3278. if (dst_offset < src_offset) {
  3279. memcpy_extent_buffer(dst, dst_offset, src_offset, len);
  3280. return;
  3281. }
  3282. while (len > 0) {
  3283. dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
  3284. src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
  3285. dst_off_in_page = (start_offset + dst_end) &
  3286. ((unsigned long)PAGE_CACHE_SIZE - 1);
  3287. src_off_in_page = (start_offset + src_end) &
  3288. ((unsigned long)PAGE_CACHE_SIZE - 1);
  3289. cur = min_t(unsigned long, len, src_off_in_page + 1);
  3290. cur = min(cur, dst_off_in_page + 1);
  3291. move_pages(extent_buffer_page(dst, dst_i),
  3292. extent_buffer_page(dst, src_i),
  3293. dst_off_in_page - cur + 1,
  3294. src_off_in_page - cur + 1, cur);
  3295. dst_end -= cur;
  3296. src_end -= cur;
  3297. len -= cur;
  3298. }
  3299. }
  3300. int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
  3301. {
  3302. u64 start = page_offset(page);
  3303. struct extent_buffer *eb;
  3304. int ret = 1;
  3305. unsigned long i;
  3306. unsigned long num_pages;
  3307. spin_lock(&tree->buffer_lock);
  3308. eb = buffer_search(tree, start);
  3309. if (!eb)
  3310. goto out;
  3311. if (atomic_read(&eb->refs) > 1) {
  3312. ret = 0;
  3313. goto out;
  3314. }
  3315. /* at this point we can safely release the extent buffer */
  3316. num_pages = num_extent_pages(eb->start, eb->len);
  3317. for (i = 0; i < num_pages; i++)
  3318. page_cache_release(extent_buffer_page(eb, i));
  3319. rb_erase(&eb->rb_node, &tree->buffer);
  3320. __free_extent_buffer(eb);
  3321. out:
  3322. spin_unlock(&tree->buffer_lock);
  3323. return ret;
  3324. }