extent_io.c 93 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768
  1. #include <linux/bitops.h>
  2. #include <linux/slab.h>
  3. #include <linux/bio.h>
  4. #include <linux/mm.h>
  5. #include <linux/gfp.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/page-flags.h>
  8. #include <linux/module.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/swap.h>
  12. #include <linux/version.h>
  13. #include <linux/writeback.h>
  14. #include <linux/pagevec.h>
  15. #include "extent_io.h"
  16. #include "extent_map.h"
  17. #include "compat.h"
  18. #include "ctree.h"
  19. #include "btrfs_inode.h"
  20. /* temporary define until extent_map moves out of btrfs */
  21. struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
  22. unsigned long extra_flags,
  23. void (*ctor)(void *, struct kmem_cache *,
  24. unsigned long));
  25. static struct kmem_cache *extent_state_cache;
  26. static struct kmem_cache *extent_buffer_cache;
  27. static LIST_HEAD(buffers);
  28. static LIST_HEAD(states);
  29. #define LEAK_DEBUG 0
  30. #ifdef LEAK_DEBUG
  31. static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED;
  32. #endif
  33. #define BUFFER_LRU_MAX 64
  34. struct tree_entry {
  35. u64 start;
  36. u64 end;
  37. struct rb_node rb_node;
  38. };
  39. struct extent_page_data {
  40. struct bio *bio;
  41. struct extent_io_tree *tree;
  42. get_extent_t *get_extent;
  43. /* tells writepage not to lock the state bits for this range
  44. * it still does the unlocking
  45. */
  46. int extent_locked;
  47. };
  48. int __init extent_io_init(void)
  49. {
  50. extent_state_cache = btrfs_cache_create("extent_state",
  51. sizeof(struct extent_state), 0,
  52. NULL);
  53. if (!extent_state_cache)
  54. return -ENOMEM;
  55. extent_buffer_cache = btrfs_cache_create("extent_buffers",
  56. sizeof(struct extent_buffer), 0,
  57. NULL);
  58. if (!extent_buffer_cache)
  59. goto free_state_cache;
  60. return 0;
  61. free_state_cache:
  62. kmem_cache_destroy(extent_state_cache);
  63. return -ENOMEM;
  64. }
  65. void extent_io_exit(void)
  66. {
  67. struct extent_state *state;
  68. struct extent_buffer *eb;
  69. while (!list_empty(&states)) {
  70. state = list_entry(states.next, struct extent_state, leak_list);
  71. printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
  72. list_del(&state->leak_list);
  73. kmem_cache_free(extent_state_cache, state);
  74. }
  75. while (!list_empty(&buffers)) {
  76. eb = list_entry(buffers.next, struct extent_buffer, leak_list);
  77. printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs));
  78. list_del(&eb->leak_list);
  79. kmem_cache_free(extent_buffer_cache, eb);
  80. }
  81. if (extent_state_cache)
  82. kmem_cache_destroy(extent_state_cache);
  83. if (extent_buffer_cache)
  84. kmem_cache_destroy(extent_buffer_cache);
  85. }
  86. void extent_io_tree_init(struct extent_io_tree *tree,
  87. struct address_space *mapping, gfp_t mask)
  88. {
  89. tree->state.rb_node = NULL;
  90. tree->buffer.rb_node = NULL;
  91. tree->ops = NULL;
  92. tree->dirty_bytes = 0;
  93. spin_lock_init(&tree->lock);
  94. spin_lock_init(&tree->buffer_lock);
  95. tree->mapping = mapping;
  96. }
  97. EXPORT_SYMBOL(extent_io_tree_init);
  98. struct extent_state *alloc_extent_state(gfp_t mask)
  99. {
  100. struct extent_state *state;
  101. #ifdef LEAK_DEBUG
  102. unsigned long flags;
  103. #endif
  104. state = kmem_cache_alloc(extent_state_cache, mask);
  105. if (!state)
  106. return state;
  107. state->state = 0;
  108. state->private = 0;
  109. state->tree = NULL;
  110. #ifdef LEAK_DEBUG
  111. spin_lock_irqsave(&leak_lock, flags);
  112. list_add(&state->leak_list, &states);
  113. spin_unlock_irqrestore(&leak_lock, flags);
  114. #endif
  115. atomic_set(&state->refs, 1);
  116. init_waitqueue_head(&state->wq);
  117. return state;
  118. }
  119. EXPORT_SYMBOL(alloc_extent_state);
  120. void free_extent_state(struct extent_state *state)
  121. {
  122. if (!state)
  123. return;
  124. if (atomic_dec_and_test(&state->refs)) {
  125. #ifdef LEAK_DEBUG
  126. unsigned long flags;
  127. #endif
  128. WARN_ON(state->tree);
  129. #ifdef LEAK_DEBUG
  130. spin_lock_irqsave(&leak_lock, flags);
  131. list_del(&state->leak_list);
  132. spin_unlock_irqrestore(&leak_lock, flags);
  133. #endif
  134. kmem_cache_free(extent_state_cache, state);
  135. }
  136. }
  137. EXPORT_SYMBOL(free_extent_state);
  138. static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
  139. struct rb_node *node)
  140. {
  141. struct rb_node ** p = &root->rb_node;
  142. struct rb_node * parent = NULL;
  143. struct tree_entry *entry;
  144. while(*p) {
  145. parent = *p;
  146. entry = rb_entry(parent, struct tree_entry, rb_node);
  147. if (offset < entry->start)
  148. p = &(*p)->rb_left;
  149. else if (offset > entry->end)
  150. p = &(*p)->rb_right;
  151. else
  152. return parent;
  153. }
  154. entry = rb_entry(node, struct tree_entry, rb_node);
  155. rb_link_node(node, parent, p);
  156. rb_insert_color(node, root);
  157. return NULL;
  158. }
  159. static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
  160. struct rb_node **prev_ret,
  161. struct rb_node **next_ret)
  162. {
  163. struct rb_root *root = &tree->state;
  164. struct rb_node * n = root->rb_node;
  165. struct rb_node *prev = NULL;
  166. struct rb_node *orig_prev = NULL;
  167. struct tree_entry *entry;
  168. struct tree_entry *prev_entry = NULL;
  169. while(n) {
  170. entry = rb_entry(n, struct tree_entry, rb_node);
  171. prev = n;
  172. prev_entry = entry;
  173. if (offset < entry->start)
  174. n = n->rb_left;
  175. else if (offset > entry->end)
  176. n = n->rb_right;
  177. else {
  178. return n;
  179. }
  180. }
  181. if (prev_ret) {
  182. orig_prev = prev;
  183. while(prev && offset > prev_entry->end) {
  184. prev = rb_next(prev);
  185. prev_entry = rb_entry(prev, struct tree_entry, rb_node);
  186. }
  187. *prev_ret = prev;
  188. prev = orig_prev;
  189. }
  190. if (next_ret) {
  191. prev_entry = rb_entry(prev, struct tree_entry, rb_node);
  192. while(prev && offset < prev_entry->start) {
  193. prev = rb_prev(prev);
  194. prev_entry = rb_entry(prev, struct tree_entry, rb_node);
  195. }
  196. *next_ret = prev;
  197. }
  198. return NULL;
  199. }
  200. static inline struct rb_node *tree_search(struct extent_io_tree *tree,
  201. u64 offset)
  202. {
  203. struct rb_node *prev = NULL;
  204. struct rb_node *ret;
  205. ret = __etree_search(tree, offset, &prev, NULL);
  206. if (!ret) {
  207. return prev;
  208. }
  209. return ret;
  210. }
  211. static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
  212. u64 offset, struct rb_node *node)
  213. {
  214. struct rb_root *root = &tree->buffer;
  215. struct rb_node ** p = &root->rb_node;
  216. struct rb_node * parent = NULL;
  217. struct extent_buffer *eb;
  218. while(*p) {
  219. parent = *p;
  220. eb = rb_entry(parent, struct extent_buffer, rb_node);
  221. if (offset < eb->start)
  222. p = &(*p)->rb_left;
  223. else if (offset > eb->start)
  224. p = &(*p)->rb_right;
  225. else
  226. return eb;
  227. }
  228. rb_link_node(node, parent, p);
  229. rb_insert_color(node, root);
  230. return NULL;
  231. }
  232. static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
  233. u64 offset)
  234. {
  235. struct rb_root *root = &tree->buffer;
  236. struct rb_node * n = root->rb_node;
  237. struct extent_buffer *eb;
  238. while(n) {
  239. eb = rb_entry(n, struct extent_buffer, rb_node);
  240. if (offset < eb->start)
  241. n = n->rb_left;
  242. else if (offset > eb->start)
  243. n = n->rb_right;
  244. else
  245. return eb;
  246. }
  247. return NULL;
  248. }
  249. /*
  250. * utility function to look for merge candidates inside a given range.
  251. * Any extents with matching state are merged together into a single
  252. * extent in the tree. Extents with EXTENT_IO in their state field
  253. * are not merged because the end_io handlers need to be able to do
  254. * operations on them without sleeping (or doing allocations/splits).
  255. *
  256. * This should be called with the tree lock held.
  257. */
  258. static int merge_state(struct extent_io_tree *tree,
  259. struct extent_state *state)
  260. {
  261. struct extent_state *other;
  262. struct rb_node *other_node;
  263. if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
  264. return 0;
  265. other_node = rb_prev(&state->rb_node);
  266. if (other_node) {
  267. other = rb_entry(other_node, struct extent_state, rb_node);
  268. if (other->end == state->start - 1 &&
  269. other->state == state->state) {
  270. state->start = other->start;
  271. other->tree = NULL;
  272. rb_erase(&other->rb_node, &tree->state);
  273. free_extent_state(other);
  274. }
  275. }
  276. other_node = rb_next(&state->rb_node);
  277. if (other_node) {
  278. other = rb_entry(other_node, struct extent_state, rb_node);
  279. if (other->start == state->end + 1 &&
  280. other->state == state->state) {
  281. other->start = state->start;
  282. state->tree = NULL;
  283. rb_erase(&state->rb_node, &tree->state);
  284. free_extent_state(state);
  285. }
  286. }
  287. return 0;
  288. }
  289. static void set_state_cb(struct extent_io_tree *tree,
  290. struct extent_state *state,
  291. unsigned long bits)
  292. {
  293. if (tree->ops && tree->ops->set_bit_hook) {
  294. tree->ops->set_bit_hook(tree->mapping->host, state->start,
  295. state->end, state->state, bits);
  296. }
  297. }
  298. static void clear_state_cb(struct extent_io_tree *tree,
  299. struct extent_state *state,
  300. unsigned long bits)
  301. {
  302. if (tree->ops && tree->ops->set_bit_hook) {
  303. tree->ops->clear_bit_hook(tree->mapping->host, state->start,
  304. state->end, state->state, bits);
  305. }
  306. }
  307. /*
  308. * insert an extent_state struct into the tree. 'bits' are set on the
  309. * struct before it is inserted.
  310. *
  311. * This may return -EEXIST if the extent is already there, in which case the
  312. * state struct is freed.
  313. *
  314. * The tree lock is not taken internally. This is a utility function and
  315. * probably isn't what you want to call (see set/clear_extent_bit).
  316. */
  317. static int insert_state(struct extent_io_tree *tree,
  318. struct extent_state *state, u64 start, u64 end,
  319. int bits)
  320. {
  321. struct rb_node *node;
  322. if (end < start) {
  323. printk("end < start %Lu %Lu\n", end, start);
  324. WARN_ON(1);
  325. }
  326. if (bits & EXTENT_DIRTY)
  327. tree->dirty_bytes += end - start + 1;
  328. set_state_cb(tree, state, bits);
  329. state->state |= bits;
  330. state->start = start;
  331. state->end = end;
  332. node = tree_insert(&tree->state, end, &state->rb_node);
  333. if (node) {
  334. struct extent_state *found;
  335. found = rb_entry(node, struct extent_state, rb_node);
  336. printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
  337. free_extent_state(state);
  338. return -EEXIST;
  339. }
  340. state->tree = tree;
  341. merge_state(tree, state);
  342. return 0;
  343. }
  344. /*
  345. * split a given extent state struct in two, inserting the preallocated
  346. * struct 'prealloc' as the newly created second half. 'split' indicates an
  347. * offset inside 'orig' where it should be split.
  348. *
  349. * Before calling,
  350. * the tree has 'orig' at [orig->start, orig->end]. After calling, there
  351. * are two extent state structs in the tree:
  352. * prealloc: [orig->start, split - 1]
  353. * orig: [ split, orig->end ]
  354. *
  355. * The tree locks are not taken by this function. They need to be held
  356. * by the caller.
  357. */
  358. static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
  359. struct extent_state *prealloc, u64 split)
  360. {
  361. struct rb_node *node;
  362. prealloc->start = orig->start;
  363. prealloc->end = split - 1;
  364. prealloc->state = orig->state;
  365. orig->start = split;
  366. node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
  367. if (node) {
  368. struct extent_state *found;
  369. found = rb_entry(node, struct extent_state, rb_node);
  370. printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
  371. free_extent_state(prealloc);
  372. return -EEXIST;
  373. }
  374. prealloc->tree = tree;
  375. return 0;
  376. }
  377. /*
  378. * utility function to clear some bits in an extent state struct.
  379. * it will optionally wake up any one waiting on this state (wake == 1), or
  380. * forcibly remove the state from the tree (delete == 1).
  381. *
  382. * If no bits are set on the state struct after clearing things, the
  383. * struct is freed and removed from the tree
  384. */
  385. static int clear_state_bit(struct extent_io_tree *tree,
  386. struct extent_state *state, int bits, int wake,
  387. int delete)
  388. {
  389. int ret = state->state & bits;
  390. if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
  391. u64 range = state->end - state->start + 1;
  392. WARN_ON(range > tree->dirty_bytes);
  393. tree->dirty_bytes -= range;
  394. }
  395. clear_state_cb(tree, state, bits);
  396. state->state &= ~bits;
  397. if (wake)
  398. wake_up(&state->wq);
  399. if (delete || state->state == 0) {
  400. if (state->tree) {
  401. clear_state_cb(tree, state, state->state);
  402. rb_erase(&state->rb_node, &tree->state);
  403. state->tree = NULL;
  404. free_extent_state(state);
  405. } else {
  406. WARN_ON(1);
  407. }
  408. } else {
  409. merge_state(tree, state);
  410. }
  411. return ret;
  412. }
  413. /*
  414. * clear some bits on a range in the tree. This may require splitting
  415. * or inserting elements in the tree, so the gfp mask is used to
  416. * indicate which allocations or sleeping are allowed.
  417. *
  418. * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
  419. * the given range from the tree regardless of state (ie for truncate).
  420. *
  421. * the range [start, end] is inclusive.
  422. *
  423. * This takes the tree lock, and returns < 0 on error, > 0 if any of the
  424. * bits were already set, or zero if none of the bits were already set.
  425. */
  426. int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  427. int bits, int wake, int delete, gfp_t mask)
  428. {
  429. struct extent_state *state;
  430. struct extent_state *prealloc = NULL;
  431. struct rb_node *node;
  432. unsigned long flags;
  433. int err;
  434. int set = 0;
  435. again:
  436. if (!prealloc && (mask & __GFP_WAIT)) {
  437. prealloc = alloc_extent_state(mask);
  438. if (!prealloc)
  439. return -ENOMEM;
  440. }
  441. spin_lock_irqsave(&tree->lock, flags);
  442. /*
  443. * this search will find the extents that end after
  444. * our range starts
  445. */
  446. node = tree_search(tree, start);
  447. if (!node)
  448. goto out;
  449. state = rb_entry(node, struct extent_state, rb_node);
  450. if (state->start > end)
  451. goto out;
  452. WARN_ON(state->end < start);
  453. /*
  454. * | ---- desired range ---- |
  455. * | state | or
  456. * | ------------- state -------------- |
  457. *
  458. * We need to split the extent we found, and may flip
  459. * bits on second half.
  460. *
  461. * If the extent we found extends past our range, we
  462. * just split and search again. It'll get split again
  463. * the next time though.
  464. *
  465. * If the extent we found is inside our range, we clear
  466. * the desired bit on it.
  467. */
  468. if (state->start < start) {
  469. if (!prealloc)
  470. prealloc = alloc_extent_state(GFP_ATOMIC);
  471. err = split_state(tree, state, prealloc, start);
  472. BUG_ON(err == -EEXIST);
  473. prealloc = NULL;
  474. if (err)
  475. goto out;
  476. if (state->end <= end) {
  477. start = state->end + 1;
  478. set |= clear_state_bit(tree, state, bits,
  479. wake, delete);
  480. } else {
  481. start = state->start;
  482. }
  483. goto search_again;
  484. }
  485. /*
  486. * | ---- desired range ---- |
  487. * | state |
  488. * We need to split the extent, and clear the bit
  489. * on the first half
  490. */
  491. if (state->start <= end && state->end > end) {
  492. if (!prealloc)
  493. prealloc = alloc_extent_state(GFP_ATOMIC);
  494. err = split_state(tree, state, prealloc, end + 1);
  495. BUG_ON(err == -EEXIST);
  496. if (wake)
  497. wake_up(&state->wq);
  498. set |= clear_state_bit(tree, prealloc, bits,
  499. wake, delete);
  500. prealloc = NULL;
  501. goto out;
  502. }
  503. start = state->end + 1;
  504. set |= clear_state_bit(tree, state, bits, wake, delete);
  505. goto search_again;
  506. out:
  507. spin_unlock_irqrestore(&tree->lock, flags);
  508. if (prealloc)
  509. free_extent_state(prealloc);
  510. return set;
  511. search_again:
  512. if (start > end)
  513. goto out;
  514. spin_unlock_irqrestore(&tree->lock, flags);
  515. if (mask & __GFP_WAIT)
  516. cond_resched();
  517. goto again;
  518. }
  519. EXPORT_SYMBOL(clear_extent_bit);
  520. static int wait_on_state(struct extent_io_tree *tree,
  521. struct extent_state *state)
  522. {
  523. DEFINE_WAIT(wait);
  524. prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
  525. spin_unlock_irq(&tree->lock);
  526. schedule();
  527. spin_lock_irq(&tree->lock);
  528. finish_wait(&state->wq, &wait);
  529. return 0;
  530. }
  531. /*
  532. * waits for one or more bits to clear on a range in the state tree.
  533. * The range [start, end] is inclusive.
  534. * The tree lock is taken by this function
  535. */
  536. int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
  537. {
  538. struct extent_state *state;
  539. struct rb_node *node;
  540. spin_lock_irq(&tree->lock);
  541. again:
  542. while (1) {
  543. /*
  544. * this search will find all the extents that end after
  545. * our range starts
  546. */
  547. node = tree_search(tree, start);
  548. if (!node)
  549. break;
  550. state = rb_entry(node, struct extent_state, rb_node);
  551. if (state->start > end)
  552. goto out;
  553. if (state->state & bits) {
  554. start = state->start;
  555. atomic_inc(&state->refs);
  556. wait_on_state(tree, state);
  557. free_extent_state(state);
  558. goto again;
  559. }
  560. start = state->end + 1;
  561. if (start > end)
  562. break;
  563. if (need_resched()) {
  564. spin_unlock_irq(&tree->lock);
  565. cond_resched();
  566. spin_lock_irq(&tree->lock);
  567. }
  568. }
  569. out:
  570. spin_unlock_irq(&tree->lock);
  571. return 0;
  572. }
  573. EXPORT_SYMBOL(wait_extent_bit);
  574. static void set_state_bits(struct extent_io_tree *tree,
  575. struct extent_state *state,
  576. int bits)
  577. {
  578. if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
  579. u64 range = state->end - state->start + 1;
  580. tree->dirty_bytes += range;
  581. }
  582. set_state_cb(tree, state, bits);
  583. state->state |= bits;
  584. }
  585. /*
  586. * set some bits on a range in the tree. This may require allocations
  587. * or sleeping, so the gfp mask is used to indicate what is allowed.
  588. *
  589. * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
  590. * range already has the desired bits set. The start of the existing
  591. * range is returned in failed_start in this case.
  592. *
  593. * [start, end] is inclusive
  594. * This takes the tree lock.
  595. */
  596. int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
  597. int exclusive, u64 *failed_start, gfp_t mask)
  598. {
  599. struct extent_state *state;
  600. struct extent_state *prealloc = NULL;
  601. struct rb_node *node;
  602. unsigned long flags;
  603. int err = 0;
  604. int set;
  605. u64 last_start;
  606. u64 last_end;
  607. again:
  608. if (!prealloc && (mask & __GFP_WAIT)) {
  609. prealloc = alloc_extent_state(mask);
  610. if (!prealloc)
  611. return -ENOMEM;
  612. }
  613. spin_lock_irqsave(&tree->lock, flags);
  614. /*
  615. * this search will find all the extents that end after
  616. * our range starts.
  617. */
  618. node = tree_search(tree, start);
  619. if (!node) {
  620. err = insert_state(tree, prealloc, start, end, bits);
  621. prealloc = NULL;
  622. BUG_ON(err == -EEXIST);
  623. goto out;
  624. }
  625. state = rb_entry(node, struct extent_state, rb_node);
  626. last_start = state->start;
  627. last_end = state->end;
  628. /*
  629. * | ---- desired range ---- |
  630. * | state |
  631. *
  632. * Just lock what we found and keep going
  633. */
  634. if (state->start == start && state->end <= end) {
  635. set = state->state & bits;
  636. if (set && exclusive) {
  637. *failed_start = state->start;
  638. err = -EEXIST;
  639. goto out;
  640. }
  641. set_state_bits(tree, state, bits);
  642. start = state->end + 1;
  643. merge_state(tree, state);
  644. goto search_again;
  645. }
  646. /*
  647. * | ---- desired range ---- |
  648. * | state |
  649. * or
  650. * | ------------- state -------------- |
  651. *
  652. * We need to split the extent we found, and may flip bits on
  653. * second half.
  654. *
  655. * If the extent we found extends past our
  656. * range, we just split and search again. It'll get split
  657. * again the next time though.
  658. *
  659. * If the extent we found is inside our range, we set the
  660. * desired bit on it.
  661. */
  662. if (state->start < start) {
  663. set = state->state & bits;
  664. if (exclusive && set) {
  665. *failed_start = start;
  666. err = -EEXIST;
  667. goto out;
  668. }
  669. err = split_state(tree, state, prealloc, start);
  670. BUG_ON(err == -EEXIST);
  671. prealloc = NULL;
  672. if (err)
  673. goto out;
  674. if (state->end <= end) {
  675. set_state_bits(tree, state, bits);
  676. start = state->end + 1;
  677. merge_state(tree, state);
  678. } else {
  679. start = state->start;
  680. }
  681. goto search_again;
  682. }
  683. /*
  684. * | ---- desired range ---- |
  685. * | state | or | state |
  686. *
  687. * There's a hole, we need to insert something in it and
  688. * ignore the extent we found.
  689. */
  690. if (state->start > start) {
  691. u64 this_end;
  692. if (end < last_start)
  693. this_end = end;
  694. else
  695. this_end = last_start -1;
  696. err = insert_state(tree, prealloc, start, this_end,
  697. bits);
  698. prealloc = NULL;
  699. BUG_ON(err == -EEXIST);
  700. if (err)
  701. goto out;
  702. start = this_end + 1;
  703. goto search_again;
  704. }
  705. /*
  706. * | ---- desired range ---- |
  707. * | state |
  708. * We need to split the extent, and set the bit
  709. * on the first half
  710. */
  711. if (state->start <= end && state->end > end) {
  712. set = state->state & bits;
  713. if (exclusive && set) {
  714. *failed_start = start;
  715. err = -EEXIST;
  716. goto out;
  717. }
  718. err = split_state(tree, state, prealloc, end + 1);
  719. BUG_ON(err == -EEXIST);
  720. set_state_bits(tree, prealloc, bits);
  721. merge_state(tree, prealloc);
  722. prealloc = NULL;
  723. goto out;
  724. }
  725. goto search_again;
  726. out:
  727. spin_unlock_irqrestore(&tree->lock, flags);
  728. if (prealloc)
  729. free_extent_state(prealloc);
  730. return err;
  731. search_again:
  732. if (start > end)
  733. goto out;
  734. spin_unlock_irqrestore(&tree->lock, flags);
  735. if (mask & __GFP_WAIT)
  736. cond_resched();
  737. goto again;
  738. }
  739. EXPORT_SYMBOL(set_extent_bit);
  740. /* wrappers around set/clear extent bit */
  741. int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
  742. gfp_t mask)
  743. {
  744. return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
  745. mask);
  746. }
  747. EXPORT_SYMBOL(set_extent_dirty);
  748. int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
  749. gfp_t mask)
  750. {
  751. return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
  752. }
  753. EXPORT_SYMBOL(set_extent_ordered);
  754. int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
  755. int bits, gfp_t mask)
  756. {
  757. return set_extent_bit(tree, start, end, bits, 0, NULL,
  758. mask);
  759. }
  760. EXPORT_SYMBOL(set_extent_bits);
  761. int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
  762. int bits, gfp_t mask)
  763. {
  764. return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
  765. }
  766. EXPORT_SYMBOL(clear_extent_bits);
  767. int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
  768. gfp_t mask)
  769. {
  770. return set_extent_bit(tree, start, end,
  771. EXTENT_DELALLOC | EXTENT_DIRTY,
  772. 0, NULL, mask);
  773. }
  774. EXPORT_SYMBOL(set_extent_delalloc);
  775. int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
  776. gfp_t mask)
  777. {
  778. return clear_extent_bit(tree, start, end,
  779. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
  780. }
  781. EXPORT_SYMBOL(clear_extent_dirty);
  782. int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
  783. gfp_t mask)
  784. {
  785. return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
  786. }
  787. EXPORT_SYMBOL(clear_extent_ordered);
  788. int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
  789. gfp_t mask)
  790. {
  791. return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
  792. mask);
  793. }
  794. EXPORT_SYMBOL(set_extent_new);
  795. int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
  796. gfp_t mask)
  797. {
  798. return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
  799. }
  800. EXPORT_SYMBOL(clear_extent_new);
  801. int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
  802. gfp_t mask)
  803. {
  804. return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
  805. mask);
  806. }
  807. EXPORT_SYMBOL(set_extent_uptodate);
  808. int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
  809. gfp_t mask)
  810. {
  811. return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
  812. }
  813. EXPORT_SYMBOL(clear_extent_uptodate);
  814. int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
  815. gfp_t mask)
  816. {
  817. return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
  818. 0, NULL, mask);
  819. }
  820. EXPORT_SYMBOL(set_extent_writeback);
  821. int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
  822. gfp_t mask)
  823. {
  824. return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
  825. }
  826. EXPORT_SYMBOL(clear_extent_writeback);
  827. int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
  828. {
  829. return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
  830. }
  831. EXPORT_SYMBOL(wait_on_extent_writeback);
  832. /*
  833. * either insert or lock state struct between start and end use mask to tell
  834. * us if waiting is desired.
  835. */
  836. int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
  837. {
  838. int err;
  839. u64 failed_start;
  840. while (1) {
  841. err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
  842. &failed_start, mask);
  843. if (err == -EEXIST && (mask & __GFP_WAIT)) {
  844. wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
  845. start = failed_start;
  846. } else {
  847. break;
  848. }
  849. WARN_ON(start > end);
  850. }
  851. return err;
  852. }
  853. EXPORT_SYMBOL(lock_extent);
  854. int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
  855. gfp_t mask)
  856. {
  857. int err;
  858. u64 failed_start;
  859. err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
  860. &failed_start, mask);
  861. if (err == -EEXIST) {
  862. if (failed_start > start)
  863. clear_extent_bit(tree, start, failed_start - 1,
  864. EXTENT_LOCKED, 1, 0, mask);
  865. return 0;
  866. }
  867. return 1;
  868. }
  869. EXPORT_SYMBOL(try_lock_extent);
  870. int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
  871. gfp_t mask)
  872. {
  873. return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
  874. }
  875. EXPORT_SYMBOL(unlock_extent);
  876. /*
  877. * helper function to set pages and extents in the tree dirty
  878. */
  879. int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
  880. {
  881. unsigned long index = start >> PAGE_CACHE_SHIFT;
  882. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  883. struct page *page;
  884. while (index <= end_index) {
  885. page = find_get_page(tree->mapping, index);
  886. BUG_ON(!page);
  887. __set_page_dirty_nobuffers(page);
  888. page_cache_release(page);
  889. index++;
  890. }
  891. set_extent_dirty(tree, start, end, GFP_NOFS);
  892. return 0;
  893. }
  894. EXPORT_SYMBOL(set_range_dirty);
  895. /*
  896. * helper function to set both pages and extents in the tree writeback
  897. */
  898. int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
  899. {
  900. unsigned long index = start >> PAGE_CACHE_SHIFT;
  901. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  902. struct page *page;
  903. while (index <= end_index) {
  904. page = find_get_page(tree->mapping, index);
  905. BUG_ON(!page);
  906. set_page_writeback(page);
  907. page_cache_release(page);
  908. index++;
  909. }
  910. set_extent_writeback(tree, start, end, GFP_NOFS);
  911. return 0;
  912. }
  913. EXPORT_SYMBOL(set_range_writeback);
  914. /*
  915. * find the first offset in the io tree with 'bits' set. zero is
  916. * returned if we find something, and *start_ret and *end_ret are
  917. * set to reflect the state struct that was found.
  918. *
  919. * If nothing was found, 1 is returned, < 0 on error
  920. */
  921. int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
  922. u64 *start_ret, u64 *end_ret, int bits)
  923. {
  924. struct rb_node *node;
  925. struct extent_state *state;
  926. int ret = 1;
  927. spin_lock_irq(&tree->lock);
  928. /*
  929. * this search will find all the extents that end after
  930. * our range starts.
  931. */
  932. node = tree_search(tree, start);
  933. if (!node) {
  934. goto out;
  935. }
  936. while(1) {
  937. state = rb_entry(node, struct extent_state, rb_node);
  938. if (state->end >= start && (state->state & bits)) {
  939. *start_ret = state->start;
  940. *end_ret = state->end;
  941. ret = 0;
  942. break;
  943. }
  944. node = rb_next(node);
  945. if (!node)
  946. break;
  947. }
  948. out:
  949. spin_unlock_irq(&tree->lock);
  950. return ret;
  951. }
  952. EXPORT_SYMBOL(find_first_extent_bit);
  953. /* find the first state struct with 'bits' set after 'start', and
  954. * return it. tree->lock must be held. NULL will returned if
  955. * nothing was found after 'start'
  956. */
  957. struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
  958. u64 start, int bits)
  959. {
  960. struct rb_node *node;
  961. struct extent_state *state;
  962. /*
  963. * this search will find all the extents that end after
  964. * our range starts.
  965. */
  966. node = tree_search(tree, start);
  967. if (!node) {
  968. goto out;
  969. }
  970. while(1) {
  971. state = rb_entry(node, struct extent_state, rb_node);
  972. if (state->end >= start && (state->state & bits)) {
  973. return state;
  974. }
  975. node = rb_next(node);
  976. if (!node)
  977. break;
  978. }
  979. out:
  980. return NULL;
  981. }
  982. EXPORT_SYMBOL(find_first_extent_bit_state);
  983. /*
  984. * find a contiguous range of bytes in the file marked as delalloc, not
  985. * more than 'max_bytes'. start and end are used to return the range,
  986. *
  987. * 1 is returned if we find something, 0 if nothing was in the tree
  988. */
  989. static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
  990. u64 *start, u64 *end, u64 max_bytes)
  991. {
  992. struct rb_node *node;
  993. struct extent_state *state;
  994. u64 cur_start = *start;
  995. u64 found = 0;
  996. u64 total_bytes = 0;
  997. spin_lock_irq(&tree->lock);
  998. /*
  999. * this search will find all the extents that end after
  1000. * our range starts.
  1001. */
  1002. node = tree_search(tree, cur_start);
  1003. if (!node) {
  1004. if (!found)
  1005. *end = (u64)-1;
  1006. goto out;
  1007. }
  1008. while(1) {
  1009. state = rb_entry(node, struct extent_state, rb_node);
  1010. if (found && (state->start != cur_start ||
  1011. (state->state & EXTENT_BOUNDARY))) {
  1012. goto out;
  1013. }
  1014. if (!(state->state & EXTENT_DELALLOC)) {
  1015. if (!found)
  1016. *end = state->end;
  1017. goto out;
  1018. }
  1019. if (!found)
  1020. *start = state->start;
  1021. found++;
  1022. *end = state->end;
  1023. cur_start = state->end + 1;
  1024. node = rb_next(node);
  1025. if (!node)
  1026. break;
  1027. total_bytes += state->end - state->start + 1;
  1028. if (total_bytes >= max_bytes)
  1029. break;
  1030. }
  1031. out:
  1032. spin_unlock_irq(&tree->lock);
  1033. return found;
  1034. }
  1035. static noinline int __unlock_for_delalloc(struct inode *inode,
  1036. struct page *locked_page,
  1037. u64 start, u64 end)
  1038. {
  1039. int ret;
  1040. struct page *pages[16];
  1041. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1042. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1043. unsigned long nr_pages = end_index - index + 1;
  1044. int i;
  1045. if (index == locked_page->index && end_index == index)
  1046. return 0;
  1047. while(nr_pages > 0) {
  1048. ret = find_get_pages_contig(inode->i_mapping, index,
  1049. min(nr_pages, ARRAY_SIZE(pages)), pages);
  1050. for (i = 0; i < ret; i++) {
  1051. if (pages[i] != locked_page)
  1052. unlock_page(pages[i]);
  1053. page_cache_release(pages[i]);
  1054. }
  1055. nr_pages -= ret;
  1056. index += ret;
  1057. cond_resched();
  1058. }
  1059. return 0;
  1060. }
  1061. static noinline int lock_delalloc_pages(struct inode *inode,
  1062. struct page *locked_page,
  1063. u64 delalloc_start,
  1064. u64 delalloc_end)
  1065. {
  1066. unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
  1067. unsigned long start_index = index;
  1068. unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
  1069. unsigned long pages_locked = 0;
  1070. struct page *pages[16];
  1071. unsigned long nrpages;
  1072. int ret;
  1073. int i;
  1074. /* the caller is responsible for locking the start index */
  1075. if (index == locked_page->index && index == end_index)
  1076. return 0;
  1077. /* skip the page at the start index */
  1078. nrpages = end_index - index + 1;
  1079. while(nrpages > 0) {
  1080. ret = find_get_pages_contig(inode->i_mapping, index,
  1081. min(nrpages, ARRAY_SIZE(pages)), pages);
  1082. if (ret == 0) {
  1083. ret = -EAGAIN;
  1084. goto done;
  1085. }
  1086. /* now we have an array of pages, lock them all */
  1087. for (i = 0; i < ret; i++) {
  1088. /*
  1089. * the caller is taking responsibility for
  1090. * locked_page
  1091. */
  1092. if (pages[i] != locked_page) {
  1093. lock_page(pages[i]);
  1094. if (!PageDirty(pages[i]) ||
  1095. pages[i]->mapping != inode->i_mapping) {
  1096. ret = -EAGAIN;
  1097. unlock_page(pages[i]);
  1098. page_cache_release(pages[i]);
  1099. goto done;
  1100. }
  1101. }
  1102. page_cache_release(pages[i]);
  1103. pages_locked++;
  1104. }
  1105. nrpages -= ret;
  1106. index += ret;
  1107. cond_resched();
  1108. }
  1109. ret = 0;
  1110. done:
  1111. if (ret && pages_locked) {
  1112. __unlock_for_delalloc(inode, locked_page,
  1113. delalloc_start,
  1114. ((u64)(start_index + pages_locked - 1)) <<
  1115. PAGE_CACHE_SHIFT);
  1116. }
  1117. return ret;
  1118. }
  1119. /*
  1120. * find a contiguous range of bytes in the file marked as delalloc, not
  1121. * more than 'max_bytes'. start and end are used to return the range,
  1122. *
  1123. * 1 is returned if we find something, 0 if nothing was in the tree
  1124. */
  1125. static noinline u64 find_lock_delalloc_range(struct inode *inode,
  1126. struct extent_io_tree *tree,
  1127. struct page *locked_page,
  1128. u64 *start, u64 *end,
  1129. u64 max_bytes)
  1130. {
  1131. u64 delalloc_start;
  1132. u64 delalloc_end;
  1133. u64 found;
  1134. int ret;
  1135. int loops = 0;
  1136. again:
  1137. /* step one, find a bunch of delalloc bytes starting at start */
  1138. delalloc_start = *start;
  1139. delalloc_end = 0;
  1140. found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
  1141. max_bytes);
  1142. if (!found || delalloc_end <= *start) {
  1143. *start = delalloc_start;
  1144. *end = delalloc_end;
  1145. return found;
  1146. }
  1147. /*
  1148. * start comes from the offset of locked_page. We have to lock
  1149. * pages in order, so we can't process delalloc bytes before
  1150. * locked_page
  1151. */
  1152. if (delalloc_start < *start) {
  1153. delalloc_start = *start;
  1154. }
  1155. /*
  1156. * make sure to limit the number of pages we try to lock down
  1157. * if we're looping.
  1158. */
  1159. if (delalloc_end + 1 - delalloc_start > max_bytes && loops) {
  1160. delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
  1161. }
  1162. /* step two, lock all the pages after the page that has start */
  1163. ret = lock_delalloc_pages(inode, locked_page,
  1164. delalloc_start, delalloc_end);
  1165. if (ret == -EAGAIN) {
  1166. /* some of the pages are gone, lets avoid looping by
  1167. * shortening the size of the delalloc range we're searching
  1168. */
  1169. if (!loops) {
  1170. unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
  1171. max_bytes = PAGE_CACHE_SIZE - offset;
  1172. loops = 1;
  1173. goto again;
  1174. } else {
  1175. found = 0;
  1176. goto out_failed;
  1177. }
  1178. }
  1179. BUG_ON(ret);
  1180. /* step three, lock the state bits for the whole range */
  1181. lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
  1182. /* then test to make sure it is all still delalloc */
  1183. ret = test_range_bit(tree, delalloc_start, delalloc_end,
  1184. EXTENT_DELALLOC, 1);
  1185. if (!ret) {
  1186. unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
  1187. __unlock_for_delalloc(inode, locked_page,
  1188. delalloc_start, delalloc_end);
  1189. cond_resched();
  1190. goto again;
  1191. }
  1192. *start = delalloc_start;
  1193. *end = delalloc_end;
  1194. out_failed:
  1195. return found;
  1196. }
  1197. int extent_clear_unlock_delalloc(struct inode *inode,
  1198. struct extent_io_tree *tree,
  1199. u64 start, u64 end, struct page *locked_page,
  1200. int unlock_pages,
  1201. int clear_unlock,
  1202. int clear_delalloc, int clear_dirty,
  1203. int set_writeback,
  1204. int end_writeback)
  1205. {
  1206. int ret;
  1207. struct page *pages[16];
  1208. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1209. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1210. unsigned long nr_pages = end_index - index + 1;
  1211. int i;
  1212. int clear_bits = 0;
  1213. if (clear_unlock)
  1214. clear_bits |= EXTENT_LOCKED;
  1215. if (clear_dirty)
  1216. clear_bits |= EXTENT_DIRTY;
  1217. if (clear_delalloc)
  1218. clear_bits |= EXTENT_DELALLOC;
  1219. clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
  1220. if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
  1221. return 0;
  1222. while(nr_pages > 0) {
  1223. ret = find_get_pages_contig(inode->i_mapping, index,
  1224. min(nr_pages, ARRAY_SIZE(pages)), pages);
  1225. for (i = 0; i < ret; i++) {
  1226. if (pages[i] == locked_page) {
  1227. page_cache_release(pages[i]);
  1228. continue;
  1229. }
  1230. if (clear_dirty)
  1231. clear_page_dirty_for_io(pages[i]);
  1232. if (set_writeback)
  1233. set_page_writeback(pages[i]);
  1234. if (end_writeback)
  1235. end_page_writeback(pages[i]);
  1236. if (unlock_pages)
  1237. unlock_page(pages[i]);
  1238. page_cache_release(pages[i]);
  1239. }
  1240. nr_pages -= ret;
  1241. index += ret;
  1242. cond_resched();
  1243. }
  1244. return 0;
  1245. }
  1246. EXPORT_SYMBOL(extent_clear_unlock_delalloc);
  1247. /*
  1248. * count the number of bytes in the tree that have a given bit(s)
  1249. * set. This can be fairly slow, except for EXTENT_DIRTY which is
  1250. * cached. The total number found is returned.
  1251. */
  1252. u64 count_range_bits(struct extent_io_tree *tree,
  1253. u64 *start, u64 search_end, u64 max_bytes,
  1254. unsigned long bits)
  1255. {
  1256. struct rb_node *node;
  1257. struct extent_state *state;
  1258. u64 cur_start = *start;
  1259. u64 total_bytes = 0;
  1260. int found = 0;
  1261. if (search_end <= cur_start) {
  1262. printk("search_end %Lu start %Lu\n", search_end, cur_start);
  1263. WARN_ON(1);
  1264. return 0;
  1265. }
  1266. spin_lock_irq(&tree->lock);
  1267. if (cur_start == 0 && bits == EXTENT_DIRTY) {
  1268. total_bytes = tree->dirty_bytes;
  1269. goto out;
  1270. }
  1271. /*
  1272. * this search will find all the extents that end after
  1273. * our range starts.
  1274. */
  1275. node = tree_search(tree, cur_start);
  1276. if (!node) {
  1277. goto out;
  1278. }
  1279. while(1) {
  1280. state = rb_entry(node, struct extent_state, rb_node);
  1281. if (state->start > search_end)
  1282. break;
  1283. if (state->end >= cur_start && (state->state & bits)) {
  1284. total_bytes += min(search_end, state->end) + 1 -
  1285. max(cur_start, state->start);
  1286. if (total_bytes >= max_bytes)
  1287. break;
  1288. if (!found) {
  1289. *start = state->start;
  1290. found = 1;
  1291. }
  1292. }
  1293. node = rb_next(node);
  1294. if (!node)
  1295. break;
  1296. }
  1297. out:
  1298. spin_unlock_irq(&tree->lock);
  1299. return total_bytes;
  1300. }
  1301. /*
  1302. * helper function to lock both pages and extents in the tree.
  1303. * pages must be locked first.
  1304. */
  1305. int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
  1306. {
  1307. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1308. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1309. struct page *page;
  1310. int err;
  1311. while (index <= end_index) {
  1312. page = grab_cache_page(tree->mapping, index);
  1313. if (!page) {
  1314. err = -ENOMEM;
  1315. goto failed;
  1316. }
  1317. if (IS_ERR(page)) {
  1318. err = PTR_ERR(page);
  1319. goto failed;
  1320. }
  1321. index++;
  1322. }
  1323. lock_extent(tree, start, end, GFP_NOFS);
  1324. return 0;
  1325. failed:
  1326. /*
  1327. * we failed above in getting the page at 'index', so we undo here
  1328. * up to but not including the page at 'index'
  1329. */
  1330. end_index = index;
  1331. index = start >> PAGE_CACHE_SHIFT;
  1332. while (index < end_index) {
  1333. page = find_get_page(tree->mapping, index);
  1334. unlock_page(page);
  1335. page_cache_release(page);
  1336. index++;
  1337. }
  1338. return err;
  1339. }
  1340. EXPORT_SYMBOL(lock_range);
  1341. /*
  1342. * helper function to unlock both pages and extents in the tree.
  1343. */
  1344. int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
  1345. {
  1346. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1347. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1348. struct page *page;
  1349. while (index <= end_index) {
  1350. page = find_get_page(tree->mapping, index);
  1351. unlock_page(page);
  1352. page_cache_release(page);
  1353. index++;
  1354. }
  1355. unlock_extent(tree, start, end, GFP_NOFS);
  1356. return 0;
  1357. }
  1358. EXPORT_SYMBOL(unlock_range);
  1359. /*
  1360. * set the private field for a given byte offset in the tree. If there isn't
  1361. * an extent_state there already, this does nothing.
  1362. */
  1363. int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
  1364. {
  1365. struct rb_node *node;
  1366. struct extent_state *state;
  1367. int ret = 0;
  1368. spin_lock_irq(&tree->lock);
  1369. /*
  1370. * this search will find all the extents that end after
  1371. * our range starts.
  1372. */
  1373. node = tree_search(tree, start);
  1374. if (!node) {
  1375. ret = -ENOENT;
  1376. goto out;
  1377. }
  1378. state = rb_entry(node, struct extent_state, rb_node);
  1379. if (state->start != start) {
  1380. ret = -ENOENT;
  1381. goto out;
  1382. }
  1383. state->private = private;
  1384. out:
  1385. spin_unlock_irq(&tree->lock);
  1386. return ret;
  1387. }
  1388. int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
  1389. {
  1390. struct rb_node *node;
  1391. struct extent_state *state;
  1392. int ret = 0;
  1393. spin_lock_irq(&tree->lock);
  1394. /*
  1395. * this search will find all the extents that end after
  1396. * our range starts.
  1397. */
  1398. node = tree_search(tree, start);
  1399. if (!node) {
  1400. ret = -ENOENT;
  1401. goto out;
  1402. }
  1403. state = rb_entry(node, struct extent_state, rb_node);
  1404. if (state->start != start) {
  1405. ret = -ENOENT;
  1406. goto out;
  1407. }
  1408. *private = state->private;
  1409. out:
  1410. spin_unlock_irq(&tree->lock);
  1411. return ret;
  1412. }
  1413. /*
  1414. * searches a range in the state tree for a given mask.
  1415. * If 'filled' == 1, this returns 1 only if every extent in the tree
  1416. * has the bits set. Otherwise, 1 is returned if any bit in the
  1417. * range is found set.
  1418. */
  1419. int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
  1420. int bits, int filled)
  1421. {
  1422. struct extent_state *state = NULL;
  1423. struct rb_node *node;
  1424. int bitset = 0;
  1425. unsigned long flags;
  1426. spin_lock_irqsave(&tree->lock, flags);
  1427. node = tree_search(tree, start);
  1428. while (node && start <= end) {
  1429. state = rb_entry(node, struct extent_state, rb_node);
  1430. if (filled && state->start > start) {
  1431. bitset = 0;
  1432. break;
  1433. }
  1434. if (state->start > end)
  1435. break;
  1436. if (state->state & bits) {
  1437. bitset = 1;
  1438. if (!filled)
  1439. break;
  1440. } else if (filled) {
  1441. bitset = 0;
  1442. break;
  1443. }
  1444. start = state->end + 1;
  1445. if (start > end)
  1446. break;
  1447. node = rb_next(node);
  1448. if (!node) {
  1449. if (filled)
  1450. bitset = 0;
  1451. break;
  1452. }
  1453. }
  1454. spin_unlock_irqrestore(&tree->lock, flags);
  1455. return bitset;
  1456. }
  1457. EXPORT_SYMBOL(test_range_bit);
  1458. /*
  1459. * helper function to set a given page up to date if all the
  1460. * extents in the tree for that page are up to date
  1461. */
  1462. static int check_page_uptodate(struct extent_io_tree *tree,
  1463. struct page *page)
  1464. {
  1465. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1466. u64 end = start + PAGE_CACHE_SIZE - 1;
  1467. if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
  1468. SetPageUptodate(page);
  1469. return 0;
  1470. }
  1471. /*
  1472. * helper function to unlock a page if all the extents in the tree
  1473. * for that page are unlocked
  1474. */
  1475. static int check_page_locked(struct extent_io_tree *tree,
  1476. struct page *page)
  1477. {
  1478. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1479. u64 end = start + PAGE_CACHE_SIZE - 1;
  1480. if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
  1481. unlock_page(page);
  1482. return 0;
  1483. }
  1484. /*
  1485. * helper function to end page writeback if all the extents
  1486. * in the tree for that page are done with writeback
  1487. */
  1488. static int check_page_writeback(struct extent_io_tree *tree,
  1489. struct page *page)
  1490. {
  1491. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1492. u64 end = start + PAGE_CACHE_SIZE - 1;
  1493. if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
  1494. end_page_writeback(page);
  1495. return 0;
  1496. }
  1497. /* lots and lots of room for performance fixes in the end_bio funcs */
  1498. /*
  1499. * after a writepage IO is done, we need to:
  1500. * clear the uptodate bits on error
  1501. * clear the writeback bits in the extent tree for this IO
  1502. * end_page_writeback if the page has no more pending IO
  1503. *
  1504. * Scheduling is not allowed, so the extent state tree is expected
  1505. * to have one and only one object corresponding to this IO.
  1506. */
  1507. static void end_bio_extent_writepage(struct bio *bio, int err)
  1508. {
  1509. int uptodate = err == 0;
  1510. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1511. struct extent_io_tree *tree;
  1512. u64 start;
  1513. u64 end;
  1514. int whole_page;
  1515. int ret;
  1516. do {
  1517. struct page *page = bvec->bv_page;
  1518. tree = &BTRFS_I(page->mapping->host)->io_tree;
  1519. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1520. bvec->bv_offset;
  1521. end = start + bvec->bv_len - 1;
  1522. if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
  1523. whole_page = 1;
  1524. else
  1525. whole_page = 0;
  1526. if (--bvec >= bio->bi_io_vec)
  1527. prefetchw(&bvec->bv_page->flags);
  1528. if (tree->ops && tree->ops->writepage_end_io_hook) {
  1529. ret = tree->ops->writepage_end_io_hook(page, start,
  1530. end, NULL, uptodate);
  1531. if (ret)
  1532. uptodate = 0;
  1533. }
  1534. if (!uptodate && tree->ops &&
  1535. tree->ops->writepage_io_failed_hook) {
  1536. ret = tree->ops->writepage_io_failed_hook(bio, page,
  1537. start, end, NULL);
  1538. if (ret == 0) {
  1539. uptodate = (err == 0);
  1540. continue;
  1541. }
  1542. }
  1543. if (!uptodate) {
  1544. clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1545. ClearPageUptodate(page);
  1546. SetPageError(page);
  1547. }
  1548. clear_extent_writeback(tree, start, end, GFP_ATOMIC);
  1549. if (whole_page)
  1550. end_page_writeback(page);
  1551. else
  1552. check_page_writeback(tree, page);
  1553. } while (bvec >= bio->bi_io_vec);
  1554. bio_put(bio);
  1555. }
  1556. /*
  1557. * after a readpage IO is done, we need to:
  1558. * clear the uptodate bits on error
  1559. * set the uptodate bits if things worked
  1560. * set the page up to date if all extents in the tree are uptodate
  1561. * clear the lock bit in the extent tree
  1562. * unlock the page if there are no other extents locked for it
  1563. *
  1564. * Scheduling is not allowed, so the extent state tree is expected
  1565. * to have one and only one object corresponding to this IO.
  1566. */
  1567. static void end_bio_extent_readpage(struct bio *bio, int err)
  1568. {
  1569. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1570. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1571. struct extent_io_tree *tree;
  1572. u64 start;
  1573. u64 end;
  1574. int whole_page;
  1575. int ret;
  1576. do {
  1577. struct page *page = bvec->bv_page;
  1578. tree = &BTRFS_I(page->mapping->host)->io_tree;
  1579. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1580. bvec->bv_offset;
  1581. end = start + bvec->bv_len - 1;
  1582. if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
  1583. whole_page = 1;
  1584. else
  1585. whole_page = 0;
  1586. if (--bvec >= bio->bi_io_vec)
  1587. prefetchw(&bvec->bv_page->flags);
  1588. if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
  1589. ret = tree->ops->readpage_end_io_hook(page, start, end,
  1590. NULL);
  1591. if (ret)
  1592. uptodate = 0;
  1593. }
  1594. if (!uptodate && tree->ops &&
  1595. tree->ops->readpage_io_failed_hook) {
  1596. ret = tree->ops->readpage_io_failed_hook(bio, page,
  1597. start, end, NULL);
  1598. if (ret == 0) {
  1599. uptodate =
  1600. test_bit(BIO_UPTODATE, &bio->bi_flags);
  1601. continue;
  1602. }
  1603. }
  1604. if (uptodate) {
  1605. set_extent_uptodate(tree, start, end,
  1606. GFP_ATOMIC);
  1607. }
  1608. unlock_extent(tree, start, end, GFP_ATOMIC);
  1609. if (whole_page) {
  1610. if (uptodate) {
  1611. SetPageUptodate(page);
  1612. } else {
  1613. ClearPageUptodate(page);
  1614. SetPageError(page);
  1615. }
  1616. unlock_page(page);
  1617. } else {
  1618. if (uptodate) {
  1619. check_page_uptodate(tree, page);
  1620. } else {
  1621. ClearPageUptodate(page);
  1622. SetPageError(page);
  1623. }
  1624. check_page_locked(tree, page);
  1625. }
  1626. } while (bvec >= bio->bi_io_vec);
  1627. bio_put(bio);
  1628. }
  1629. /*
  1630. * IO done from prepare_write is pretty simple, we just unlock
  1631. * the structs in the extent tree when done, and set the uptodate bits
  1632. * as appropriate.
  1633. */
  1634. static void end_bio_extent_preparewrite(struct bio *bio, int err)
  1635. {
  1636. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1637. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1638. struct extent_io_tree *tree;
  1639. u64 start;
  1640. u64 end;
  1641. do {
  1642. struct page *page = bvec->bv_page;
  1643. tree = &BTRFS_I(page->mapping->host)->io_tree;
  1644. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1645. bvec->bv_offset;
  1646. end = start + bvec->bv_len - 1;
  1647. if (--bvec >= bio->bi_io_vec)
  1648. prefetchw(&bvec->bv_page->flags);
  1649. if (uptodate) {
  1650. set_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1651. } else {
  1652. ClearPageUptodate(page);
  1653. SetPageError(page);
  1654. }
  1655. unlock_extent(tree, start, end, GFP_ATOMIC);
  1656. } while (bvec >= bio->bi_io_vec);
  1657. bio_put(bio);
  1658. }
  1659. static struct bio *
  1660. extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
  1661. gfp_t gfp_flags)
  1662. {
  1663. struct bio *bio;
  1664. bio = bio_alloc(gfp_flags, nr_vecs);
  1665. if (bio == NULL && (current->flags & PF_MEMALLOC)) {
  1666. while (!bio && (nr_vecs /= 2))
  1667. bio = bio_alloc(gfp_flags, nr_vecs);
  1668. }
  1669. if (bio) {
  1670. bio->bi_size = 0;
  1671. bio->bi_bdev = bdev;
  1672. bio->bi_sector = first_sector;
  1673. }
  1674. return bio;
  1675. }
  1676. static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
  1677. unsigned long bio_flags)
  1678. {
  1679. int ret = 0;
  1680. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1681. struct page *page = bvec->bv_page;
  1682. struct extent_io_tree *tree = bio->bi_private;
  1683. u64 start;
  1684. u64 end;
  1685. start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
  1686. end = start + bvec->bv_len - 1;
  1687. bio->bi_private = NULL;
  1688. bio_get(bio);
  1689. if (tree->ops && tree->ops->submit_bio_hook)
  1690. tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
  1691. mirror_num, bio_flags);
  1692. else
  1693. submit_bio(rw, bio);
  1694. if (bio_flagged(bio, BIO_EOPNOTSUPP))
  1695. ret = -EOPNOTSUPP;
  1696. bio_put(bio);
  1697. return ret;
  1698. }
  1699. static int submit_extent_page(int rw, struct extent_io_tree *tree,
  1700. struct page *page, sector_t sector,
  1701. size_t size, unsigned long offset,
  1702. struct block_device *bdev,
  1703. struct bio **bio_ret,
  1704. unsigned long max_pages,
  1705. bio_end_io_t end_io_func,
  1706. int mirror_num,
  1707. unsigned long prev_bio_flags,
  1708. unsigned long bio_flags)
  1709. {
  1710. int ret = 0;
  1711. struct bio *bio;
  1712. int nr;
  1713. int contig = 0;
  1714. int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
  1715. int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
  1716. size_t page_size = min(size, PAGE_CACHE_SIZE);
  1717. if (bio_ret && *bio_ret) {
  1718. bio = *bio_ret;
  1719. if (old_compressed)
  1720. contig = bio->bi_sector == sector;
  1721. else
  1722. contig = bio->bi_sector + (bio->bi_size >> 9) ==
  1723. sector;
  1724. if (prev_bio_flags != bio_flags || !contig ||
  1725. (tree->ops && tree->ops->merge_bio_hook &&
  1726. tree->ops->merge_bio_hook(page, offset, page_size, bio,
  1727. bio_flags)) ||
  1728. bio_add_page(bio, page, page_size, offset) < page_size) {
  1729. ret = submit_one_bio(rw, bio, mirror_num,
  1730. prev_bio_flags);
  1731. bio = NULL;
  1732. } else {
  1733. return 0;
  1734. }
  1735. }
  1736. if (this_compressed)
  1737. nr = BIO_MAX_PAGES;
  1738. else
  1739. nr = bio_get_nr_vecs(bdev);
  1740. bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
  1741. if (!bio) {
  1742. printk("failed to allocate bio nr %d\n", nr);
  1743. }
  1744. bio_add_page(bio, page, page_size, offset);
  1745. bio->bi_end_io = end_io_func;
  1746. bio->bi_private = tree;
  1747. if (bio_ret) {
  1748. *bio_ret = bio;
  1749. } else {
  1750. ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
  1751. }
  1752. return ret;
  1753. }
  1754. void set_page_extent_mapped(struct page *page)
  1755. {
  1756. if (!PagePrivate(page)) {
  1757. SetPagePrivate(page);
  1758. page_cache_get(page);
  1759. set_page_private(page, EXTENT_PAGE_PRIVATE);
  1760. }
  1761. }
  1762. EXPORT_SYMBOL(set_page_extent_mapped);
  1763. void set_page_extent_head(struct page *page, unsigned long len)
  1764. {
  1765. set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
  1766. }
  1767. /*
  1768. * basic readpage implementation. Locked extent state structs are inserted
  1769. * into the tree that are removed when the IO is done (by the end_io
  1770. * handlers)
  1771. */
  1772. static int __extent_read_full_page(struct extent_io_tree *tree,
  1773. struct page *page,
  1774. get_extent_t *get_extent,
  1775. struct bio **bio, int mirror_num,
  1776. unsigned long *bio_flags)
  1777. {
  1778. struct inode *inode = page->mapping->host;
  1779. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1780. u64 page_end = start + PAGE_CACHE_SIZE - 1;
  1781. u64 end;
  1782. u64 cur = start;
  1783. u64 extent_offset;
  1784. u64 last_byte = i_size_read(inode);
  1785. u64 block_start;
  1786. u64 cur_end;
  1787. sector_t sector;
  1788. struct extent_map *em;
  1789. struct block_device *bdev;
  1790. int ret;
  1791. int nr = 0;
  1792. size_t page_offset = 0;
  1793. size_t iosize;
  1794. size_t disk_io_size;
  1795. size_t blocksize = inode->i_sb->s_blocksize;
  1796. unsigned long this_bio_flag = 0;
  1797. set_page_extent_mapped(page);
  1798. end = page_end;
  1799. lock_extent(tree, start, end, GFP_NOFS);
  1800. if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
  1801. char *userpage;
  1802. size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
  1803. if (zero_offset) {
  1804. iosize = PAGE_CACHE_SIZE - zero_offset;
  1805. userpage = kmap_atomic(page, KM_USER0);
  1806. memset(userpage + zero_offset, 0, iosize);
  1807. flush_dcache_page(page);
  1808. kunmap_atomic(userpage, KM_USER0);
  1809. }
  1810. }
  1811. while (cur <= end) {
  1812. if (cur >= last_byte) {
  1813. char *userpage;
  1814. iosize = PAGE_CACHE_SIZE - page_offset;
  1815. userpage = kmap_atomic(page, KM_USER0);
  1816. memset(userpage + page_offset, 0, iosize);
  1817. flush_dcache_page(page);
  1818. kunmap_atomic(userpage, KM_USER0);
  1819. set_extent_uptodate(tree, cur, cur + iosize - 1,
  1820. GFP_NOFS);
  1821. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1822. break;
  1823. }
  1824. em = get_extent(inode, page, page_offset, cur,
  1825. end - cur + 1, 0);
  1826. if (IS_ERR(em) || !em) {
  1827. SetPageError(page);
  1828. unlock_extent(tree, cur, end, GFP_NOFS);
  1829. break;
  1830. }
  1831. extent_offset = cur - em->start;
  1832. if (extent_map_end(em) <= cur) {
  1833. printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur);
  1834. }
  1835. BUG_ON(extent_map_end(em) <= cur);
  1836. if (end < cur) {
  1837. printk("2bad mapping end %Lu cur %Lu\n", end, cur);
  1838. }
  1839. BUG_ON(end < cur);
  1840. if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
  1841. this_bio_flag = EXTENT_BIO_COMPRESSED;
  1842. iosize = min(extent_map_end(em) - cur, end - cur + 1);
  1843. cur_end = min(extent_map_end(em) - 1, end);
  1844. iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
  1845. if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
  1846. disk_io_size = em->block_len;
  1847. sector = em->block_start >> 9;
  1848. } else {
  1849. sector = (em->block_start + extent_offset) >> 9;
  1850. disk_io_size = iosize;
  1851. }
  1852. bdev = em->bdev;
  1853. block_start = em->block_start;
  1854. if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
  1855. block_start = EXTENT_MAP_HOLE;
  1856. free_extent_map(em);
  1857. em = NULL;
  1858. /* we've found a hole, just zero and go on */
  1859. if (block_start == EXTENT_MAP_HOLE) {
  1860. char *userpage;
  1861. userpage = kmap_atomic(page, KM_USER0);
  1862. memset(userpage + page_offset, 0, iosize);
  1863. flush_dcache_page(page);
  1864. kunmap_atomic(userpage, KM_USER0);
  1865. set_extent_uptodate(tree, cur, cur + iosize - 1,
  1866. GFP_NOFS);
  1867. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1868. cur = cur + iosize;
  1869. page_offset += iosize;
  1870. continue;
  1871. }
  1872. /* the get_extent function already copied into the page */
  1873. if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
  1874. check_page_uptodate(tree, page);
  1875. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1876. cur = cur + iosize;
  1877. page_offset += iosize;
  1878. continue;
  1879. }
  1880. /* we have an inline extent but it didn't get marked up
  1881. * to date. Error out
  1882. */
  1883. if (block_start == EXTENT_MAP_INLINE) {
  1884. SetPageError(page);
  1885. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1886. cur = cur + iosize;
  1887. page_offset += iosize;
  1888. continue;
  1889. }
  1890. ret = 0;
  1891. if (tree->ops && tree->ops->readpage_io_hook) {
  1892. ret = tree->ops->readpage_io_hook(page, cur,
  1893. cur + iosize - 1);
  1894. }
  1895. if (!ret) {
  1896. unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
  1897. pnr -= page->index;
  1898. ret = submit_extent_page(READ, tree, page,
  1899. sector, disk_io_size, page_offset,
  1900. bdev, bio, pnr,
  1901. end_bio_extent_readpage, mirror_num,
  1902. *bio_flags,
  1903. this_bio_flag);
  1904. nr++;
  1905. *bio_flags = this_bio_flag;
  1906. }
  1907. if (ret)
  1908. SetPageError(page);
  1909. cur = cur + iosize;
  1910. page_offset += iosize;
  1911. }
  1912. if (!nr) {
  1913. if (!PageError(page))
  1914. SetPageUptodate(page);
  1915. unlock_page(page);
  1916. }
  1917. return 0;
  1918. }
  1919. int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
  1920. get_extent_t *get_extent)
  1921. {
  1922. struct bio *bio = NULL;
  1923. unsigned long bio_flags = 0;
  1924. int ret;
  1925. ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
  1926. &bio_flags);
  1927. if (bio)
  1928. submit_one_bio(READ, bio, 0, bio_flags);
  1929. return ret;
  1930. }
  1931. EXPORT_SYMBOL(extent_read_full_page);
  1932. /*
  1933. * the writepage semantics are similar to regular writepage. extent
  1934. * records are inserted to lock ranges in the tree, and as dirty areas
  1935. * are found, they are marked writeback. Then the lock bits are removed
  1936. * and the end_io handler clears the writeback ranges
  1937. */
  1938. static int __extent_writepage(struct page *page, struct writeback_control *wbc,
  1939. void *data)
  1940. {
  1941. struct inode *inode = page->mapping->host;
  1942. struct extent_page_data *epd = data;
  1943. struct extent_io_tree *tree = epd->tree;
  1944. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1945. u64 delalloc_start;
  1946. u64 page_end = start + PAGE_CACHE_SIZE - 1;
  1947. u64 end;
  1948. u64 cur = start;
  1949. u64 extent_offset;
  1950. u64 last_byte = i_size_read(inode);
  1951. u64 block_start;
  1952. u64 iosize;
  1953. u64 unlock_start;
  1954. sector_t sector;
  1955. struct extent_map *em;
  1956. struct block_device *bdev;
  1957. int ret;
  1958. int nr = 0;
  1959. size_t pg_offset = 0;
  1960. size_t blocksize;
  1961. loff_t i_size = i_size_read(inode);
  1962. unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
  1963. u64 nr_delalloc;
  1964. u64 delalloc_end;
  1965. int page_started;
  1966. int compressed;
  1967. unsigned long nr_written = 0;
  1968. WARN_ON(!PageLocked(page));
  1969. pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
  1970. if (page->index > end_index ||
  1971. (page->index == end_index && !pg_offset)) {
  1972. page->mapping->a_ops->invalidatepage(page, 0);
  1973. unlock_page(page);
  1974. return 0;
  1975. }
  1976. if (page->index == end_index) {
  1977. char *userpage;
  1978. userpage = kmap_atomic(page, KM_USER0);
  1979. memset(userpage + pg_offset, 0,
  1980. PAGE_CACHE_SIZE - pg_offset);
  1981. kunmap_atomic(userpage, KM_USER0);
  1982. flush_dcache_page(page);
  1983. }
  1984. pg_offset = 0;
  1985. set_page_extent_mapped(page);
  1986. delalloc_start = start;
  1987. delalloc_end = 0;
  1988. page_started = 0;
  1989. if (!epd->extent_locked) {
  1990. while(delalloc_end < page_end) {
  1991. nr_delalloc = find_lock_delalloc_range(inode, tree,
  1992. page,
  1993. &delalloc_start,
  1994. &delalloc_end,
  1995. 128 * 1024 * 1024);
  1996. if (nr_delalloc == 0) {
  1997. delalloc_start = delalloc_end + 1;
  1998. continue;
  1999. }
  2000. tree->ops->fill_delalloc(inode, page, delalloc_start,
  2001. delalloc_end, &page_started,
  2002. &nr_written);
  2003. delalloc_start = delalloc_end + 1;
  2004. }
  2005. /* did the fill delalloc function already unlock and start
  2006. * the IO?
  2007. */
  2008. if (page_started) {
  2009. ret = 0;
  2010. goto update_nr_written;
  2011. }
  2012. }
  2013. lock_extent(tree, start, page_end, GFP_NOFS);
  2014. unlock_start = start;
  2015. if (tree->ops && tree->ops->writepage_start_hook) {
  2016. ret = tree->ops->writepage_start_hook(page, start,
  2017. page_end);
  2018. if (ret == -EAGAIN) {
  2019. unlock_extent(tree, start, page_end, GFP_NOFS);
  2020. redirty_page_for_writepage(wbc, page);
  2021. unlock_page(page);
  2022. ret = 0;
  2023. goto update_nr_written;
  2024. }
  2025. }
  2026. nr_written++;
  2027. end = page_end;
  2028. if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
  2029. printk("found delalloc bits after lock_extent\n");
  2030. }
  2031. if (last_byte <= start) {
  2032. clear_extent_dirty(tree, start, page_end, GFP_NOFS);
  2033. unlock_extent(tree, start, page_end, GFP_NOFS);
  2034. if (tree->ops && tree->ops->writepage_end_io_hook)
  2035. tree->ops->writepage_end_io_hook(page, start,
  2036. page_end, NULL, 1);
  2037. unlock_start = page_end + 1;
  2038. goto done;
  2039. }
  2040. set_extent_uptodate(tree, start, page_end, GFP_NOFS);
  2041. blocksize = inode->i_sb->s_blocksize;
  2042. while (cur <= end) {
  2043. if (cur >= last_byte) {
  2044. clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
  2045. unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
  2046. if (tree->ops && tree->ops->writepage_end_io_hook)
  2047. tree->ops->writepage_end_io_hook(page, cur,
  2048. page_end, NULL, 1);
  2049. unlock_start = page_end + 1;
  2050. break;
  2051. }
  2052. em = epd->get_extent(inode, page, pg_offset, cur,
  2053. end - cur + 1, 1);
  2054. if (IS_ERR(em) || !em) {
  2055. SetPageError(page);
  2056. break;
  2057. }
  2058. extent_offset = cur - em->start;
  2059. BUG_ON(extent_map_end(em) <= cur);
  2060. BUG_ON(end < cur);
  2061. iosize = min(extent_map_end(em) - cur, end - cur + 1);
  2062. iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
  2063. sector = (em->block_start + extent_offset) >> 9;
  2064. bdev = em->bdev;
  2065. block_start = em->block_start;
  2066. compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  2067. free_extent_map(em);
  2068. em = NULL;
  2069. /*
  2070. * compressed and inline extents are written through other
  2071. * paths in the FS
  2072. */
  2073. if (compressed || block_start == EXTENT_MAP_HOLE ||
  2074. block_start == EXTENT_MAP_INLINE) {
  2075. clear_extent_dirty(tree, cur,
  2076. cur + iosize - 1, GFP_NOFS);
  2077. unlock_extent(tree, unlock_start, cur + iosize -1,
  2078. GFP_NOFS);
  2079. /*
  2080. * end_io notification does not happen here for
  2081. * compressed extents
  2082. */
  2083. if (!compressed && tree->ops &&
  2084. tree->ops->writepage_end_io_hook)
  2085. tree->ops->writepage_end_io_hook(page, cur,
  2086. cur + iosize - 1,
  2087. NULL, 1);
  2088. else if (compressed) {
  2089. /* we don't want to end_page_writeback on
  2090. * a compressed extent. this happens
  2091. * elsewhere
  2092. */
  2093. nr++;
  2094. }
  2095. cur += iosize;
  2096. pg_offset += iosize;
  2097. unlock_start = cur;
  2098. continue;
  2099. }
  2100. /* leave this out until we have a page_mkwrite call */
  2101. if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
  2102. EXTENT_DIRTY, 0)) {
  2103. cur = cur + iosize;
  2104. pg_offset += iosize;
  2105. continue;
  2106. }
  2107. clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
  2108. if (tree->ops && tree->ops->writepage_io_hook) {
  2109. ret = tree->ops->writepage_io_hook(page, cur,
  2110. cur + iosize - 1);
  2111. } else {
  2112. ret = 0;
  2113. }
  2114. if (ret) {
  2115. SetPageError(page);
  2116. } else {
  2117. unsigned long max_nr = end_index + 1;
  2118. set_range_writeback(tree, cur, cur + iosize - 1);
  2119. if (!PageWriteback(page)) {
  2120. printk("warning page %lu not writeback, "
  2121. "cur %llu end %llu\n", page->index,
  2122. (unsigned long long)cur,
  2123. (unsigned long long)end);
  2124. }
  2125. ret = submit_extent_page(WRITE, tree, page, sector,
  2126. iosize, pg_offset, bdev,
  2127. &epd->bio, max_nr,
  2128. end_bio_extent_writepage,
  2129. 0, 0, 0);
  2130. if (ret)
  2131. SetPageError(page);
  2132. }
  2133. cur = cur + iosize;
  2134. pg_offset += iosize;
  2135. nr++;
  2136. }
  2137. done:
  2138. if (nr == 0) {
  2139. /* make sure the mapping tag for page dirty gets cleared */
  2140. set_page_writeback(page);
  2141. end_page_writeback(page);
  2142. }
  2143. if (unlock_start <= page_end)
  2144. unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
  2145. unlock_page(page);
  2146. update_nr_written:
  2147. wbc->nr_to_write -= nr_written;
  2148. if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
  2149. wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
  2150. page->mapping->writeback_index = page->index + nr_written;
  2151. return 0;
  2152. }
  2153. /**
  2154. * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
  2155. * @mapping: address space structure to write
  2156. * @wbc: subtract the number of written pages from *@wbc->nr_to_write
  2157. * @writepage: function called for each page
  2158. * @data: data passed to writepage function
  2159. *
  2160. * If a page is already under I/O, write_cache_pages() skips it, even
  2161. * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
  2162. * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
  2163. * and msync() need to guarantee that all the data which was dirty at the time
  2164. * the call was made get new I/O started against them. If wbc->sync_mode is
  2165. * WB_SYNC_ALL then we were called for data integrity and we must wait for
  2166. * existing IO to complete.
  2167. */
  2168. int extent_write_cache_pages(struct extent_io_tree *tree,
  2169. struct address_space *mapping,
  2170. struct writeback_control *wbc,
  2171. writepage_t writepage, void *data)
  2172. {
  2173. struct backing_dev_info *bdi = mapping->backing_dev_info;
  2174. int ret = 0;
  2175. int done = 0;
  2176. struct pagevec pvec;
  2177. int nr_pages;
  2178. pgoff_t index;
  2179. pgoff_t end; /* Inclusive */
  2180. int scanned = 0;
  2181. int range_whole = 0;
  2182. if (wbc->nonblocking && bdi_write_congested(bdi)) {
  2183. wbc->encountered_congestion = 1;
  2184. return 0;
  2185. }
  2186. pagevec_init(&pvec, 0);
  2187. if (wbc->range_cyclic) {
  2188. index = mapping->writeback_index; /* Start from prev offset */
  2189. end = -1;
  2190. } else {
  2191. index = wbc->range_start >> PAGE_CACHE_SHIFT;
  2192. end = wbc->range_end >> PAGE_CACHE_SHIFT;
  2193. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  2194. range_whole = 1;
  2195. scanned = 1;
  2196. }
  2197. retry:
  2198. while (!done && (index <= end) &&
  2199. (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  2200. PAGECACHE_TAG_DIRTY,
  2201. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
  2202. unsigned i;
  2203. scanned = 1;
  2204. for (i = 0; i < nr_pages; i++) {
  2205. struct page *page = pvec.pages[i];
  2206. /*
  2207. * At this point we hold neither mapping->tree_lock nor
  2208. * lock on the page itself: the page may be truncated or
  2209. * invalidated (changing page->mapping to NULL), or even
  2210. * swizzled back from swapper_space to tmpfs file
  2211. * mapping
  2212. */
  2213. if (tree->ops && tree->ops->write_cache_pages_lock_hook)
  2214. tree->ops->write_cache_pages_lock_hook(page);
  2215. else
  2216. lock_page(page);
  2217. if (unlikely(page->mapping != mapping)) {
  2218. unlock_page(page);
  2219. continue;
  2220. }
  2221. if (!wbc->range_cyclic && page->index > end) {
  2222. done = 1;
  2223. unlock_page(page);
  2224. continue;
  2225. }
  2226. if (wbc->sync_mode != WB_SYNC_NONE)
  2227. wait_on_page_writeback(page);
  2228. if (PageWriteback(page) ||
  2229. !clear_page_dirty_for_io(page)) {
  2230. unlock_page(page);
  2231. continue;
  2232. }
  2233. ret = (*writepage)(page, wbc, data);
  2234. if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
  2235. unlock_page(page);
  2236. ret = 0;
  2237. }
  2238. if (ret || wbc->nr_to_write <= 0)
  2239. done = 1;
  2240. if (wbc->nonblocking && bdi_write_congested(bdi)) {
  2241. wbc->encountered_congestion = 1;
  2242. done = 1;
  2243. }
  2244. }
  2245. pagevec_release(&pvec);
  2246. cond_resched();
  2247. }
  2248. if (!scanned && !done) {
  2249. /*
  2250. * We hit the last page and there is more work to be done: wrap
  2251. * back to the start of the file
  2252. */
  2253. scanned = 1;
  2254. index = 0;
  2255. goto retry;
  2256. }
  2257. return ret;
  2258. }
  2259. EXPORT_SYMBOL(extent_write_cache_pages);
  2260. int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
  2261. get_extent_t *get_extent,
  2262. struct writeback_control *wbc)
  2263. {
  2264. int ret;
  2265. struct address_space *mapping = page->mapping;
  2266. struct extent_page_data epd = {
  2267. .bio = NULL,
  2268. .tree = tree,
  2269. .get_extent = get_extent,
  2270. .extent_locked = 0,
  2271. };
  2272. struct writeback_control wbc_writepages = {
  2273. .bdi = wbc->bdi,
  2274. .sync_mode = WB_SYNC_NONE,
  2275. .older_than_this = NULL,
  2276. .nr_to_write = 64,
  2277. .range_start = page_offset(page) + PAGE_CACHE_SIZE,
  2278. .range_end = (loff_t)-1,
  2279. };
  2280. ret = __extent_writepage(page, wbc, &epd);
  2281. extent_write_cache_pages(tree, mapping, &wbc_writepages,
  2282. __extent_writepage, &epd);
  2283. if (epd.bio) {
  2284. submit_one_bio(WRITE, epd.bio, 0, 0);
  2285. }
  2286. return ret;
  2287. }
  2288. EXPORT_SYMBOL(extent_write_full_page);
  2289. int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
  2290. u64 start, u64 end, get_extent_t *get_extent,
  2291. int mode)
  2292. {
  2293. int ret = 0;
  2294. struct address_space *mapping = inode->i_mapping;
  2295. struct page *page;
  2296. unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
  2297. PAGE_CACHE_SHIFT;
  2298. struct extent_page_data epd = {
  2299. .bio = NULL,
  2300. .tree = tree,
  2301. .get_extent = get_extent,
  2302. .extent_locked = 1,
  2303. };
  2304. struct writeback_control wbc_writepages = {
  2305. .bdi = inode->i_mapping->backing_dev_info,
  2306. .sync_mode = mode,
  2307. .older_than_this = NULL,
  2308. .nr_to_write = nr_pages * 2,
  2309. .range_start = start,
  2310. .range_end = end + 1,
  2311. };
  2312. while(start <= end) {
  2313. page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
  2314. if (clear_page_dirty_for_io(page))
  2315. ret = __extent_writepage(page, &wbc_writepages, &epd);
  2316. else {
  2317. if (tree->ops && tree->ops->writepage_end_io_hook)
  2318. tree->ops->writepage_end_io_hook(page, start,
  2319. start + PAGE_CACHE_SIZE - 1,
  2320. NULL, 1);
  2321. unlock_page(page);
  2322. }
  2323. page_cache_release(page);
  2324. start += PAGE_CACHE_SIZE;
  2325. }
  2326. if (epd.bio)
  2327. submit_one_bio(WRITE, epd.bio, 0, 0);
  2328. return ret;
  2329. }
  2330. EXPORT_SYMBOL(extent_write_locked_range);
  2331. int extent_writepages(struct extent_io_tree *tree,
  2332. struct address_space *mapping,
  2333. get_extent_t *get_extent,
  2334. struct writeback_control *wbc)
  2335. {
  2336. int ret = 0;
  2337. struct extent_page_data epd = {
  2338. .bio = NULL,
  2339. .tree = tree,
  2340. .get_extent = get_extent,
  2341. .extent_locked = 0,
  2342. };
  2343. ret = extent_write_cache_pages(tree, mapping, wbc,
  2344. __extent_writepage, &epd);
  2345. if (epd.bio) {
  2346. submit_one_bio(WRITE, epd.bio, 0, 0);
  2347. }
  2348. return ret;
  2349. }
  2350. EXPORT_SYMBOL(extent_writepages);
  2351. int extent_readpages(struct extent_io_tree *tree,
  2352. struct address_space *mapping,
  2353. struct list_head *pages, unsigned nr_pages,
  2354. get_extent_t get_extent)
  2355. {
  2356. struct bio *bio = NULL;
  2357. unsigned page_idx;
  2358. struct pagevec pvec;
  2359. unsigned long bio_flags = 0;
  2360. pagevec_init(&pvec, 0);
  2361. for (page_idx = 0; page_idx < nr_pages; page_idx++) {
  2362. struct page *page = list_entry(pages->prev, struct page, lru);
  2363. prefetchw(&page->flags);
  2364. list_del(&page->lru);
  2365. /*
  2366. * what we want to do here is call add_to_page_cache_lru,
  2367. * but that isn't exported, so we reproduce it here
  2368. */
  2369. if (!add_to_page_cache(page, mapping,
  2370. page->index, GFP_KERNEL)) {
  2371. /* open coding of lru_cache_add, also not exported */
  2372. page_cache_get(page);
  2373. if (!pagevec_add(&pvec, page))
  2374. __pagevec_lru_add(&pvec);
  2375. __extent_read_full_page(tree, page, get_extent,
  2376. &bio, 0, &bio_flags);
  2377. }
  2378. page_cache_release(page);
  2379. }
  2380. if (pagevec_count(&pvec))
  2381. __pagevec_lru_add(&pvec);
  2382. BUG_ON(!list_empty(pages));
  2383. if (bio)
  2384. submit_one_bio(READ, bio, 0, bio_flags);
  2385. return 0;
  2386. }
  2387. EXPORT_SYMBOL(extent_readpages);
  2388. /*
  2389. * basic invalidatepage code, this waits on any locked or writeback
  2390. * ranges corresponding to the page, and then deletes any extent state
  2391. * records from the tree
  2392. */
  2393. int extent_invalidatepage(struct extent_io_tree *tree,
  2394. struct page *page, unsigned long offset)
  2395. {
  2396. u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
  2397. u64 end = start + PAGE_CACHE_SIZE - 1;
  2398. size_t blocksize = page->mapping->host->i_sb->s_blocksize;
  2399. start += (offset + blocksize -1) & ~(blocksize - 1);
  2400. if (start > end)
  2401. return 0;
  2402. lock_extent(tree, start, end, GFP_NOFS);
  2403. wait_on_extent_writeback(tree, start, end);
  2404. clear_extent_bit(tree, start, end,
  2405. EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
  2406. 1, 1, GFP_NOFS);
  2407. return 0;
  2408. }
  2409. EXPORT_SYMBOL(extent_invalidatepage);
  2410. /*
  2411. * simple commit_write call, set_range_dirty is used to mark both
  2412. * the pages and the extent records as dirty
  2413. */
  2414. int extent_commit_write(struct extent_io_tree *tree,
  2415. struct inode *inode, struct page *page,
  2416. unsigned from, unsigned to)
  2417. {
  2418. loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
  2419. set_page_extent_mapped(page);
  2420. set_page_dirty(page);
  2421. if (pos > inode->i_size) {
  2422. i_size_write(inode, pos);
  2423. mark_inode_dirty(inode);
  2424. }
  2425. return 0;
  2426. }
  2427. EXPORT_SYMBOL(extent_commit_write);
  2428. int extent_prepare_write(struct extent_io_tree *tree,
  2429. struct inode *inode, struct page *page,
  2430. unsigned from, unsigned to, get_extent_t *get_extent)
  2431. {
  2432. u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  2433. u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
  2434. u64 block_start;
  2435. u64 orig_block_start;
  2436. u64 block_end;
  2437. u64 cur_end;
  2438. struct extent_map *em;
  2439. unsigned blocksize = 1 << inode->i_blkbits;
  2440. size_t page_offset = 0;
  2441. size_t block_off_start;
  2442. size_t block_off_end;
  2443. int err = 0;
  2444. int iocount = 0;
  2445. int ret = 0;
  2446. int isnew;
  2447. set_page_extent_mapped(page);
  2448. block_start = (page_start + from) & ~((u64)blocksize - 1);
  2449. block_end = (page_start + to - 1) | (blocksize - 1);
  2450. orig_block_start = block_start;
  2451. lock_extent(tree, page_start, page_end, GFP_NOFS);
  2452. while(block_start <= block_end) {
  2453. em = get_extent(inode, page, page_offset, block_start,
  2454. block_end - block_start + 1, 1);
  2455. if (IS_ERR(em) || !em) {
  2456. goto err;
  2457. }
  2458. cur_end = min(block_end, extent_map_end(em) - 1);
  2459. block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
  2460. block_off_end = block_off_start + blocksize;
  2461. isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
  2462. if (!PageUptodate(page) && isnew &&
  2463. (block_off_end > to || block_off_start < from)) {
  2464. void *kaddr;
  2465. kaddr = kmap_atomic(page, KM_USER0);
  2466. if (block_off_end > to)
  2467. memset(kaddr + to, 0, block_off_end - to);
  2468. if (block_off_start < from)
  2469. memset(kaddr + block_off_start, 0,
  2470. from - block_off_start);
  2471. flush_dcache_page(page);
  2472. kunmap_atomic(kaddr, KM_USER0);
  2473. }
  2474. if ((em->block_start != EXTENT_MAP_HOLE &&
  2475. em->block_start != EXTENT_MAP_INLINE) &&
  2476. !isnew && !PageUptodate(page) &&
  2477. (block_off_end > to || block_off_start < from) &&
  2478. !test_range_bit(tree, block_start, cur_end,
  2479. EXTENT_UPTODATE, 1)) {
  2480. u64 sector;
  2481. u64 extent_offset = block_start - em->start;
  2482. size_t iosize;
  2483. sector = (em->block_start + extent_offset) >> 9;
  2484. iosize = (cur_end - block_start + blocksize) &
  2485. ~((u64)blocksize - 1);
  2486. /*
  2487. * we've already got the extent locked, but we
  2488. * need to split the state such that our end_bio
  2489. * handler can clear the lock.
  2490. */
  2491. set_extent_bit(tree, block_start,
  2492. block_start + iosize - 1,
  2493. EXTENT_LOCKED, 0, NULL, GFP_NOFS);
  2494. ret = submit_extent_page(READ, tree, page,
  2495. sector, iosize, page_offset, em->bdev,
  2496. NULL, 1,
  2497. end_bio_extent_preparewrite, 0,
  2498. 0, 0);
  2499. iocount++;
  2500. block_start = block_start + iosize;
  2501. } else {
  2502. set_extent_uptodate(tree, block_start, cur_end,
  2503. GFP_NOFS);
  2504. unlock_extent(tree, block_start, cur_end, GFP_NOFS);
  2505. block_start = cur_end + 1;
  2506. }
  2507. page_offset = block_start & (PAGE_CACHE_SIZE - 1);
  2508. free_extent_map(em);
  2509. }
  2510. if (iocount) {
  2511. wait_extent_bit(tree, orig_block_start,
  2512. block_end, EXTENT_LOCKED);
  2513. }
  2514. check_page_uptodate(tree, page);
  2515. err:
  2516. /* FIXME, zero out newly allocated blocks on error */
  2517. return err;
  2518. }
  2519. EXPORT_SYMBOL(extent_prepare_write);
  2520. /*
  2521. * a helper for releasepage, this tests for areas of the page that
  2522. * are locked or under IO and drops the related state bits if it is safe
  2523. * to drop the page.
  2524. */
  2525. int try_release_extent_state(struct extent_map_tree *map,
  2526. struct extent_io_tree *tree, struct page *page,
  2527. gfp_t mask)
  2528. {
  2529. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  2530. u64 end = start + PAGE_CACHE_SIZE - 1;
  2531. int ret = 1;
  2532. if (test_range_bit(tree, start, end,
  2533. EXTENT_IOBITS | EXTENT_ORDERED, 0))
  2534. ret = 0;
  2535. else {
  2536. if ((mask & GFP_NOFS) == GFP_NOFS)
  2537. mask = GFP_NOFS;
  2538. clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
  2539. 1, 1, mask);
  2540. }
  2541. return ret;
  2542. }
  2543. EXPORT_SYMBOL(try_release_extent_state);
  2544. /*
  2545. * a helper for releasepage. As long as there are no locked extents
  2546. * in the range corresponding to the page, both state records and extent
  2547. * map records are removed
  2548. */
  2549. int try_release_extent_mapping(struct extent_map_tree *map,
  2550. struct extent_io_tree *tree, struct page *page,
  2551. gfp_t mask)
  2552. {
  2553. struct extent_map *em;
  2554. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  2555. u64 end = start + PAGE_CACHE_SIZE - 1;
  2556. if ((mask & __GFP_WAIT) &&
  2557. page->mapping->host->i_size > 16 * 1024 * 1024) {
  2558. u64 len;
  2559. while (start <= end) {
  2560. len = end - start + 1;
  2561. spin_lock(&map->lock);
  2562. em = lookup_extent_mapping(map, start, len);
  2563. if (!em || IS_ERR(em)) {
  2564. spin_unlock(&map->lock);
  2565. break;
  2566. }
  2567. if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
  2568. em->start != start) {
  2569. spin_unlock(&map->lock);
  2570. free_extent_map(em);
  2571. break;
  2572. }
  2573. if (!test_range_bit(tree, em->start,
  2574. extent_map_end(em) - 1,
  2575. EXTENT_LOCKED | EXTENT_WRITEBACK |
  2576. EXTENT_ORDERED,
  2577. 0)) {
  2578. remove_extent_mapping(map, em);
  2579. /* once for the rb tree */
  2580. free_extent_map(em);
  2581. }
  2582. start = extent_map_end(em);
  2583. spin_unlock(&map->lock);
  2584. /* once for us */
  2585. free_extent_map(em);
  2586. }
  2587. }
  2588. return try_release_extent_state(map, tree, page, mask);
  2589. }
  2590. EXPORT_SYMBOL(try_release_extent_mapping);
  2591. sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
  2592. get_extent_t *get_extent)
  2593. {
  2594. struct inode *inode = mapping->host;
  2595. u64 start = iblock << inode->i_blkbits;
  2596. sector_t sector = 0;
  2597. size_t blksize = (1 << inode->i_blkbits);
  2598. struct extent_map *em;
  2599. lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
  2600. GFP_NOFS);
  2601. em = get_extent(inode, NULL, 0, start, blksize, 0);
  2602. unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
  2603. GFP_NOFS);
  2604. if (!em || IS_ERR(em))
  2605. return 0;
  2606. if (em->block_start > EXTENT_MAP_LAST_BYTE)
  2607. goto out;
  2608. sector = (em->block_start + start - em->start) >> inode->i_blkbits;
  2609. out:
  2610. free_extent_map(em);
  2611. return sector;
  2612. }
  2613. static inline struct page *extent_buffer_page(struct extent_buffer *eb,
  2614. unsigned long i)
  2615. {
  2616. struct page *p;
  2617. struct address_space *mapping;
  2618. if (i == 0)
  2619. return eb->first_page;
  2620. i += eb->start >> PAGE_CACHE_SHIFT;
  2621. mapping = eb->first_page->mapping;
  2622. if (!mapping)
  2623. return NULL;
  2624. /*
  2625. * extent_buffer_page is only called after pinning the page
  2626. * by increasing the reference count. So we know the page must
  2627. * be in the radix tree.
  2628. */
  2629. rcu_read_lock();
  2630. p = radix_tree_lookup(&mapping->page_tree, i);
  2631. rcu_read_unlock();
  2632. return p;
  2633. }
  2634. static inline unsigned long num_extent_pages(u64 start, u64 len)
  2635. {
  2636. return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
  2637. (start >> PAGE_CACHE_SHIFT);
  2638. }
  2639. static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
  2640. u64 start,
  2641. unsigned long len,
  2642. gfp_t mask)
  2643. {
  2644. struct extent_buffer *eb = NULL;
  2645. #ifdef LEAK_DEBUG
  2646. unsigned long flags;
  2647. #endif
  2648. eb = kmem_cache_zalloc(extent_buffer_cache, mask);
  2649. eb->start = start;
  2650. eb->len = len;
  2651. mutex_init(&eb->mutex);
  2652. #ifdef LEAK_DEBUG
  2653. spin_lock_irqsave(&leak_lock, flags);
  2654. list_add(&eb->leak_list, &buffers);
  2655. spin_unlock_irqrestore(&leak_lock, flags);
  2656. #endif
  2657. atomic_set(&eb->refs, 1);
  2658. return eb;
  2659. }
  2660. static void __free_extent_buffer(struct extent_buffer *eb)
  2661. {
  2662. #ifdef LEAK_DEBUG
  2663. unsigned long flags;
  2664. spin_lock_irqsave(&leak_lock, flags);
  2665. list_del(&eb->leak_list);
  2666. spin_unlock_irqrestore(&leak_lock, flags);
  2667. #endif
  2668. kmem_cache_free(extent_buffer_cache, eb);
  2669. }
  2670. struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
  2671. u64 start, unsigned long len,
  2672. struct page *page0,
  2673. gfp_t mask)
  2674. {
  2675. unsigned long num_pages = num_extent_pages(start, len);
  2676. unsigned long i;
  2677. unsigned long index = start >> PAGE_CACHE_SHIFT;
  2678. struct extent_buffer *eb;
  2679. struct extent_buffer *exists = NULL;
  2680. struct page *p;
  2681. struct address_space *mapping = tree->mapping;
  2682. int uptodate = 1;
  2683. spin_lock(&tree->buffer_lock);
  2684. eb = buffer_search(tree, start);
  2685. if (eb) {
  2686. atomic_inc(&eb->refs);
  2687. spin_unlock(&tree->buffer_lock);
  2688. mark_page_accessed(eb->first_page);
  2689. return eb;
  2690. }
  2691. spin_unlock(&tree->buffer_lock);
  2692. eb = __alloc_extent_buffer(tree, start, len, mask);
  2693. if (!eb)
  2694. return NULL;
  2695. if (page0) {
  2696. eb->first_page = page0;
  2697. i = 1;
  2698. index++;
  2699. page_cache_get(page0);
  2700. mark_page_accessed(page0);
  2701. set_page_extent_mapped(page0);
  2702. set_page_extent_head(page0, len);
  2703. uptodate = PageUptodate(page0);
  2704. } else {
  2705. i = 0;
  2706. }
  2707. for (; i < num_pages; i++, index++) {
  2708. p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
  2709. if (!p) {
  2710. WARN_ON(1);
  2711. goto free_eb;
  2712. }
  2713. set_page_extent_mapped(p);
  2714. mark_page_accessed(p);
  2715. if (i == 0) {
  2716. eb->first_page = p;
  2717. set_page_extent_head(p, len);
  2718. } else {
  2719. set_page_private(p, EXTENT_PAGE_PRIVATE);
  2720. }
  2721. if (!PageUptodate(p))
  2722. uptodate = 0;
  2723. unlock_page(p);
  2724. }
  2725. if (uptodate)
  2726. eb->flags |= EXTENT_UPTODATE;
  2727. eb->flags |= EXTENT_BUFFER_FILLED;
  2728. spin_lock(&tree->buffer_lock);
  2729. exists = buffer_tree_insert(tree, start, &eb->rb_node);
  2730. if (exists) {
  2731. /* add one reference for the caller */
  2732. atomic_inc(&exists->refs);
  2733. spin_unlock(&tree->buffer_lock);
  2734. goto free_eb;
  2735. }
  2736. spin_unlock(&tree->buffer_lock);
  2737. /* add one reference for the tree */
  2738. atomic_inc(&eb->refs);
  2739. return eb;
  2740. free_eb:
  2741. if (!atomic_dec_and_test(&eb->refs))
  2742. return exists;
  2743. for (index = 1; index < i; index++)
  2744. page_cache_release(extent_buffer_page(eb, index));
  2745. page_cache_release(extent_buffer_page(eb, 0));
  2746. __free_extent_buffer(eb);
  2747. return exists;
  2748. }
  2749. EXPORT_SYMBOL(alloc_extent_buffer);
  2750. struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
  2751. u64 start, unsigned long len,
  2752. gfp_t mask)
  2753. {
  2754. struct extent_buffer *eb;
  2755. spin_lock(&tree->buffer_lock);
  2756. eb = buffer_search(tree, start);
  2757. if (eb)
  2758. atomic_inc(&eb->refs);
  2759. spin_unlock(&tree->buffer_lock);
  2760. if (eb)
  2761. mark_page_accessed(eb->first_page);
  2762. return eb;
  2763. }
  2764. EXPORT_SYMBOL(find_extent_buffer);
  2765. void free_extent_buffer(struct extent_buffer *eb)
  2766. {
  2767. if (!eb)
  2768. return;
  2769. if (!atomic_dec_and_test(&eb->refs))
  2770. return;
  2771. WARN_ON(1);
  2772. }
  2773. EXPORT_SYMBOL(free_extent_buffer);
  2774. int clear_extent_buffer_dirty(struct extent_io_tree *tree,
  2775. struct extent_buffer *eb)
  2776. {
  2777. int set;
  2778. unsigned long i;
  2779. unsigned long num_pages;
  2780. struct page *page;
  2781. u64 start = eb->start;
  2782. u64 end = start + eb->len - 1;
  2783. set = clear_extent_dirty(tree, start, end, GFP_NOFS);
  2784. num_pages = num_extent_pages(eb->start, eb->len);
  2785. for (i = 0; i < num_pages; i++) {
  2786. page = extent_buffer_page(eb, i);
  2787. lock_page(page);
  2788. if (i == 0)
  2789. set_page_extent_head(page, eb->len);
  2790. else
  2791. set_page_private(page, EXTENT_PAGE_PRIVATE);
  2792. /*
  2793. * if we're on the last page or the first page and the
  2794. * block isn't aligned on a page boundary, do extra checks
  2795. * to make sure we don't clean page that is partially dirty
  2796. */
  2797. if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
  2798. ((i == num_pages - 1) &&
  2799. ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
  2800. start = (u64)page->index << PAGE_CACHE_SHIFT;
  2801. end = start + PAGE_CACHE_SIZE - 1;
  2802. if (test_range_bit(tree, start, end,
  2803. EXTENT_DIRTY, 0)) {
  2804. unlock_page(page);
  2805. continue;
  2806. }
  2807. }
  2808. clear_page_dirty_for_io(page);
  2809. spin_lock_irq(&page->mapping->tree_lock);
  2810. if (!PageDirty(page)) {
  2811. radix_tree_tag_clear(&page->mapping->page_tree,
  2812. page_index(page),
  2813. PAGECACHE_TAG_DIRTY);
  2814. }
  2815. spin_unlock_irq(&page->mapping->tree_lock);
  2816. unlock_page(page);
  2817. }
  2818. return 0;
  2819. }
  2820. EXPORT_SYMBOL(clear_extent_buffer_dirty);
  2821. int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
  2822. struct extent_buffer *eb)
  2823. {
  2824. return wait_on_extent_writeback(tree, eb->start,
  2825. eb->start + eb->len - 1);
  2826. }
  2827. EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
  2828. int set_extent_buffer_dirty(struct extent_io_tree *tree,
  2829. struct extent_buffer *eb)
  2830. {
  2831. unsigned long i;
  2832. unsigned long num_pages;
  2833. num_pages = num_extent_pages(eb->start, eb->len);
  2834. for (i = 0; i < num_pages; i++) {
  2835. struct page *page = extent_buffer_page(eb, i);
  2836. /* writepage may need to do something special for the
  2837. * first page, we have to make sure page->private is
  2838. * properly set. releasepage may drop page->private
  2839. * on us if the page isn't already dirty.
  2840. */
  2841. lock_page(page);
  2842. if (i == 0) {
  2843. set_page_extent_head(page, eb->len);
  2844. } else if (PagePrivate(page) &&
  2845. page->private != EXTENT_PAGE_PRIVATE) {
  2846. set_page_extent_mapped(page);
  2847. }
  2848. __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
  2849. set_extent_dirty(tree, page_offset(page),
  2850. page_offset(page) + PAGE_CACHE_SIZE -1,
  2851. GFP_NOFS);
  2852. unlock_page(page);
  2853. }
  2854. return 0;
  2855. }
  2856. EXPORT_SYMBOL(set_extent_buffer_dirty);
  2857. int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
  2858. struct extent_buffer *eb)
  2859. {
  2860. unsigned long i;
  2861. struct page *page;
  2862. unsigned long num_pages;
  2863. num_pages = num_extent_pages(eb->start, eb->len);
  2864. eb->flags &= ~EXTENT_UPTODATE;
  2865. clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
  2866. GFP_NOFS);
  2867. for (i = 0; i < num_pages; i++) {
  2868. page = extent_buffer_page(eb, i);
  2869. if (page)
  2870. ClearPageUptodate(page);
  2871. }
  2872. return 0;
  2873. }
  2874. int set_extent_buffer_uptodate(struct extent_io_tree *tree,
  2875. struct extent_buffer *eb)
  2876. {
  2877. unsigned long i;
  2878. struct page *page;
  2879. unsigned long num_pages;
  2880. num_pages = num_extent_pages(eb->start, eb->len);
  2881. set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
  2882. GFP_NOFS);
  2883. for (i = 0; i < num_pages; i++) {
  2884. page = extent_buffer_page(eb, i);
  2885. if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
  2886. ((i == num_pages - 1) &&
  2887. ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
  2888. check_page_uptodate(tree, page);
  2889. continue;
  2890. }
  2891. SetPageUptodate(page);
  2892. }
  2893. return 0;
  2894. }
  2895. EXPORT_SYMBOL(set_extent_buffer_uptodate);
  2896. int extent_range_uptodate(struct extent_io_tree *tree,
  2897. u64 start, u64 end)
  2898. {
  2899. struct page *page;
  2900. int ret;
  2901. int pg_uptodate = 1;
  2902. int uptodate;
  2903. unsigned long index;
  2904. ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
  2905. if (ret)
  2906. return 1;
  2907. while(start <= end) {
  2908. index = start >> PAGE_CACHE_SHIFT;
  2909. page = find_get_page(tree->mapping, index);
  2910. uptodate = PageUptodate(page);
  2911. page_cache_release(page);
  2912. if (!uptodate) {
  2913. pg_uptodate = 0;
  2914. break;
  2915. }
  2916. start += PAGE_CACHE_SIZE;
  2917. }
  2918. return pg_uptodate;
  2919. }
  2920. int extent_buffer_uptodate(struct extent_io_tree *tree,
  2921. struct extent_buffer *eb)
  2922. {
  2923. int ret = 0;
  2924. unsigned long num_pages;
  2925. unsigned long i;
  2926. struct page *page;
  2927. int pg_uptodate = 1;
  2928. if (eb->flags & EXTENT_UPTODATE)
  2929. return 1;
  2930. ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
  2931. EXTENT_UPTODATE, 1);
  2932. if (ret)
  2933. return ret;
  2934. num_pages = num_extent_pages(eb->start, eb->len);
  2935. for (i = 0; i < num_pages; i++) {
  2936. page = extent_buffer_page(eb, i);
  2937. if (!PageUptodate(page)) {
  2938. pg_uptodate = 0;
  2939. break;
  2940. }
  2941. }
  2942. return pg_uptodate;
  2943. }
  2944. EXPORT_SYMBOL(extent_buffer_uptodate);
  2945. int read_extent_buffer_pages(struct extent_io_tree *tree,
  2946. struct extent_buffer *eb,
  2947. u64 start, int wait,
  2948. get_extent_t *get_extent, int mirror_num)
  2949. {
  2950. unsigned long i;
  2951. unsigned long start_i;
  2952. struct page *page;
  2953. int err;
  2954. int ret = 0;
  2955. int locked_pages = 0;
  2956. int all_uptodate = 1;
  2957. int inc_all_pages = 0;
  2958. unsigned long num_pages;
  2959. struct bio *bio = NULL;
  2960. unsigned long bio_flags = 0;
  2961. if (eb->flags & EXTENT_UPTODATE)
  2962. return 0;
  2963. if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
  2964. EXTENT_UPTODATE, 1)) {
  2965. return 0;
  2966. }
  2967. if (start) {
  2968. WARN_ON(start < eb->start);
  2969. start_i = (start >> PAGE_CACHE_SHIFT) -
  2970. (eb->start >> PAGE_CACHE_SHIFT);
  2971. } else {
  2972. start_i = 0;
  2973. }
  2974. num_pages = num_extent_pages(eb->start, eb->len);
  2975. for (i = start_i; i < num_pages; i++) {
  2976. page = extent_buffer_page(eb, i);
  2977. if (!wait) {
  2978. if (!trylock_page(page))
  2979. goto unlock_exit;
  2980. } else {
  2981. lock_page(page);
  2982. }
  2983. locked_pages++;
  2984. if (!PageUptodate(page)) {
  2985. all_uptodate = 0;
  2986. }
  2987. }
  2988. if (all_uptodate) {
  2989. if (start_i == 0)
  2990. eb->flags |= EXTENT_UPTODATE;
  2991. if (ret) {
  2992. printk("all up to date but ret is %d\n", ret);
  2993. }
  2994. goto unlock_exit;
  2995. }
  2996. for (i = start_i; i < num_pages; i++) {
  2997. page = extent_buffer_page(eb, i);
  2998. if (inc_all_pages)
  2999. page_cache_get(page);
  3000. if (!PageUptodate(page)) {
  3001. if (start_i == 0)
  3002. inc_all_pages = 1;
  3003. ClearPageError(page);
  3004. err = __extent_read_full_page(tree, page,
  3005. get_extent, &bio,
  3006. mirror_num, &bio_flags);
  3007. if (err) {
  3008. ret = err;
  3009. printk("err %d from __extent_read_full_page\n", ret);
  3010. }
  3011. } else {
  3012. unlock_page(page);
  3013. }
  3014. }
  3015. if (bio)
  3016. submit_one_bio(READ, bio, mirror_num, bio_flags);
  3017. if (ret || !wait) {
  3018. if (ret)
  3019. printk("ret %d wait %d returning\n", ret, wait);
  3020. return ret;
  3021. }
  3022. for (i = start_i; i < num_pages; i++) {
  3023. page = extent_buffer_page(eb, i);
  3024. wait_on_page_locked(page);
  3025. if (!PageUptodate(page)) {
  3026. printk("page not uptodate after wait_on_page_locked\n");
  3027. ret = -EIO;
  3028. }
  3029. }
  3030. if (!ret)
  3031. eb->flags |= EXTENT_UPTODATE;
  3032. return ret;
  3033. unlock_exit:
  3034. i = start_i;
  3035. while(locked_pages > 0) {
  3036. page = extent_buffer_page(eb, i);
  3037. i++;
  3038. unlock_page(page);
  3039. locked_pages--;
  3040. }
  3041. return ret;
  3042. }
  3043. EXPORT_SYMBOL(read_extent_buffer_pages);
  3044. void read_extent_buffer(struct extent_buffer *eb, void *dstv,
  3045. unsigned long start,
  3046. unsigned long len)
  3047. {
  3048. size_t cur;
  3049. size_t offset;
  3050. struct page *page;
  3051. char *kaddr;
  3052. char *dst = (char *)dstv;
  3053. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  3054. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  3055. WARN_ON(start > eb->len);
  3056. WARN_ON(start + len > eb->start + eb->len);
  3057. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  3058. while(len > 0) {
  3059. page = extent_buffer_page(eb, i);
  3060. cur = min(len, (PAGE_CACHE_SIZE - offset));
  3061. kaddr = kmap_atomic(page, KM_USER1);
  3062. memcpy(dst, kaddr + offset, cur);
  3063. kunmap_atomic(kaddr, KM_USER1);
  3064. dst += cur;
  3065. len -= cur;
  3066. offset = 0;
  3067. i++;
  3068. }
  3069. }
  3070. EXPORT_SYMBOL(read_extent_buffer);
  3071. int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
  3072. unsigned long min_len, char **token, char **map,
  3073. unsigned long *map_start,
  3074. unsigned long *map_len, int km)
  3075. {
  3076. size_t offset = start & (PAGE_CACHE_SIZE - 1);
  3077. char *kaddr;
  3078. struct page *p;
  3079. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  3080. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  3081. unsigned long end_i = (start_offset + start + min_len - 1) >>
  3082. PAGE_CACHE_SHIFT;
  3083. if (i != end_i)
  3084. return -EINVAL;
  3085. if (i == 0) {
  3086. offset = start_offset;
  3087. *map_start = 0;
  3088. } else {
  3089. offset = 0;
  3090. *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
  3091. }
  3092. if (start + min_len > eb->len) {
  3093. printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
  3094. WARN_ON(1);
  3095. }
  3096. p = extent_buffer_page(eb, i);
  3097. kaddr = kmap_atomic(p, km);
  3098. *token = kaddr;
  3099. *map = kaddr + offset;
  3100. *map_len = PAGE_CACHE_SIZE - offset;
  3101. return 0;
  3102. }
  3103. EXPORT_SYMBOL(map_private_extent_buffer);
  3104. int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
  3105. unsigned long min_len,
  3106. char **token, char **map,
  3107. unsigned long *map_start,
  3108. unsigned long *map_len, int km)
  3109. {
  3110. int err;
  3111. int save = 0;
  3112. if (eb->map_token) {
  3113. unmap_extent_buffer(eb, eb->map_token, km);
  3114. eb->map_token = NULL;
  3115. save = 1;
  3116. }
  3117. err = map_private_extent_buffer(eb, start, min_len, token, map,
  3118. map_start, map_len, km);
  3119. if (!err && save) {
  3120. eb->map_token = *token;
  3121. eb->kaddr = *map;
  3122. eb->map_start = *map_start;
  3123. eb->map_len = *map_len;
  3124. }
  3125. return err;
  3126. }
  3127. EXPORT_SYMBOL(map_extent_buffer);
  3128. void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
  3129. {
  3130. kunmap_atomic(token, km);
  3131. }
  3132. EXPORT_SYMBOL(unmap_extent_buffer);
  3133. int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
  3134. unsigned long start,
  3135. unsigned long len)
  3136. {
  3137. size_t cur;
  3138. size_t offset;
  3139. struct page *page;
  3140. char *kaddr;
  3141. char *ptr = (char *)ptrv;
  3142. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  3143. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  3144. int ret = 0;
  3145. WARN_ON(start > eb->len);
  3146. WARN_ON(start + len > eb->start + eb->len);
  3147. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  3148. while(len > 0) {
  3149. page = extent_buffer_page(eb, i);
  3150. cur = min(len, (PAGE_CACHE_SIZE - offset));
  3151. kaddr = kmap_atomic(page, KM_USER0);
  3152. ret = memcmp(ptr, kaddr + offset, cur);
  3153. kunmap_atomic(kaddr, KM_USER0);
  3154. if (ret)
  3155. break;
  3156. ptr += cur;
  3157. len -= cur;
  3158. offset = 0;
  3159. i++;
  3160. }
  3161. return ret;
  3162. }
  3163. EXPORT_SYMBOL(memcmp_extent_buffer);
  3164. void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
  3165. unsigned long start, unsigned long len)
  3166. {
  3167. size_t cur;
  3168. size_t offset;
  3169. struct page *page;
  3170. char *kaddr;
  3171. char *src = (char *)srcv;
  3172. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  3173. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  3174. WARN_ON(start > eb->len);
  3175. WARN_ON(start + len > eb->start + eb->len);
  3176. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  3177. while(len > 0) {
  3178. page = extent_buffer_page(eb, i);
  3179. WARN_ON(!PageUptodate(page));
  3180. cur = min(len, PAGE_CACHE_SIZE - offset);
  3181. kaddr = kmap_atomic(page, KM_USER1);
  3182. memcpy(kaddr + offset, src, cur);
  3183. kunmap_atomic(kaddr, KM_USER1);
  3184. src += cur;
  3185. len -= cur;
  3186. offset = 0;
  3187. i++;
  3188. }
  3189. }
  3190. EXPORT_SYMBOL(write_extent_buffer);
  3191. void memset_extent_buffer(struct extent_buffer *eb, char c,
  3192. unsigned long start, unsigned long len)
  3193. {
  3194. size_t cur;
  3195. size_t offset;
  3196. struct page *page;
  3197. char *kaddr;
  3198. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  3199. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  3200. WARN_ON(start > eb->len);
  3201. WARN_ON(start + len > eb->start + eb->len);
  3202. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  3203. while(len > 0) {
  3204. page = extent_buffer_page(eb, i);
  3205. WARN_ON(!PageUptodate(page));
  3206. cur = min(len, PAGE_CACHE_SIZE - offset);
  3207. kaddr = kmap_atomic(page, KM_USER0);
  3208. memset(kaddr + offset, c, cur);
  3209. kunmap_atomic(kaddr, KM_USER0);
  3210. len -= cur;
  3211. offset = 0;
  3212. i++;
  3213. }
  3214. }
  3215. EXPORT_SYMBOL(memset_extent_buffer);
  3216. void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
  3217. unsigned long dst_offset, unsigned long src_offset,
  3218. unsigned long len)
  3219. {
  3220. u64 dst_len = dst->len;
  3221. size_t cur;
  3222. size_t offset;
  3223. struct page *page;
  3224. char *kaddr;
  3225. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  3226. unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
  3227. WARN_ON(src->len != dst_len);
  3228. offset = (start_offset + dst_offset) &
  3229. ((unsigned long)PAGE_CACHE_SIZE - 1);
  3230. while(len > 0) {
  3231. page = extent_buffer_page(dst, i);
  3232. WARN_ON(!PageUptodate(page));
  3233. cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
  3234. kaddr = kmap_atomic(page, KM_USER0);
  3235. read_extent_buffer(src, kaddr + offset, src_offset, cur);
  3236. kunmap_atomic(kaddr, KM_USER0);
  3237. src_offset += cur;
  3238. len -= cur;
  3239. offset = 0;
  3240. i++;
  3241. }
  3242. }
  3243. EXPORT_SYMBOL(copy_extent_buffer);
  3244. static void move_pages(struct page *dst_page, struct page *src_page,
  3245. unsigned long dst_off, unsigned long src_off,
  3246. unsigned long len)
  3247. {
  3248. char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
  3249. if (dst_page == src_page) {
  3250. memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
  3251. } else {
  3252. char *src_kaddr = kmap_atomic(src_page, KM_USER1);
  3253. char *p = dst_kaddr + dst_off + len;
  3254. char *s = src_kaddr + src_off + len;
  3255. while (len--)
  3256. *--p = *--s;
  3257. kunmap_atomic(src_kaddr, KM_USER1);
  3258. }
  3259. kunmap_atomic(dst_kaddr, KM_USER0);
  3260. }
  3261. static void copy_pages(struct page *dst_page, struct page *src_page,
  3262. unsigned long dst_off, unsigned long src_off,
  3263. unsigned long len)
  3264. {
  3265. char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
  3266. char *src_kaddr;
  3267. if (dst_page != src_page)
  3268. src_kaddr = kmap_atomic(src_page, KM_USER1);
  3269. else
  3270. src_kaddr = dst_kaddr;
  3271. memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
  3272. kunmap_atomic(dst_kaddr, KM_USER0);
  3273. if (dst_page != src_page)
  3274. kunmap_atomic(src_kaddr, KM_USER1);
  3275. }
  3276. void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  3277. unsigned long src_offset, unsigned long len)
  3278. {
  3279. size_t cur;
  3280. size_t dst_off_in_page;
  3281. size_t src_off_in_page;
  3282. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  3283. unsigned long dst_i;
  3284. unsigned long src_i;
  3285. if (src_offset + len > dst->len) {
  3286. printk("memmove bogus src_offset %lu move len %lu len %lu\n",
  3287. src_offset, len, dst->len);
  3288. BUG_ON(1);
  3289. }
  3290. if (dst_offset + len > dst->len) {
  3291. printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
  3292. dst_offset, len, dst->len);
  3293. BUG_ON(1);
  3294. }
  3295. while(len > 0) {
  3296. dst_off_in_page = (start_offset + dst_offset) &
  3297. ((unsigned long)PAGE_CACHE_SIZE - 1);
  3298. src_off_in_page = (start_offset + src_offset) &
  3299. ((unsigned long)PAGE_CACHE_SIZE - 1);
  3300. dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
  3301. src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
  3302. cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
  3303. src_off_in_page));
  3304. cur = min_t(unsigned long, cur,
  3305. (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
  3306. copy_pages(extent_buffer_page(dst, dst_i),
  3307. extent_buffer_page(dst, src_i),
  3308. dst_off_in_page, src_off_in_page, cur);
  3309. src_offset += cur;
  3310. dst_offset += cur;
  3311. len -= cur;
  3312. }
  3313. }
  3314. EXPORT_SYMBOL(memcpy_extent_buffer);
  3315. void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  3316. unsigned long src_offset, unsigned long len)
  3317. {
  3318. size_t cur;
  3319. size_t dst_off_in_page;
  3320. size_t src_off_in_page;
  3321. unsigned long dst_end = dst_offset + len - 1;
  3322. unsigned long src_end = src_offset + len - 1;
  3323. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  3324. unsigned long dst_i;
  3325. unsigned long src_i;
  3326. if (src_offset + len > dst->len) {
  3327. printk("memmove bogus src_offset %lu move len %lu len %lu\n",
  3328. src_offset, len, dst->len);
  3329. BUG_ON(1);
  3330. }
  3331. if (dst_offset + len > dst->len) {
  3332. printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
  3333. dst_offset, len, dst->len);
  3334. BUG_ON(1);
  3335. }
  3336. if (dst_offset < src_offset) {
  3337. memcpy_extent_buffer(dst, dst_offset, src_offset, len);
  3338. return;
  3339. }
  3340. while(len > 0) {
  3341. dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
  3342. src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
  3343. dst_off_in_page = (start_offset + dst_end) &
  3344. ((unsigned long)PAGE_CACHE_SIZE - 1);
  3345. src_off_in_page = (start_offset + src_end) &
  3346. ((unsigned long)PAGE_CACHE_SIZE - 1);
  3347. cur = min_t(unsigned long, len, src_off_in_page + 1);
  3348. cur = min(cur, dst_off_in_page + 1);
  3349. move_pages(extent_buffer_page(dst, dst_i),
  3350. extent_buffer_page(dst, src_i),
  3351. dst_off_in_page - cur + 1,
  3352. src_off_in_page - cur + 1, cur);
  3353. dst_end -= cur;
  3354. src_end -= cur;
  3355. len -= cur;
  3356. }
  3357. }
  3358. EXPORT_SYMBOL(memmove_extent_buffer);
  3359. int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
  3360. {
  3361. u64 start = page_offset(page);
  3362. struct extent_buffer *eb;
  3363. int ret = 1;
  3364. unsigned long i;
  3365. unsigned long num_pages;
  3366. spin_lock(&tree->buffer_lock);
  3367. eb = buffer_search(tree, start);
  3368. if (!eb)
  3369. goto out;
  3370. if (atomic_read(&eb->refs) > 1) {
  3371. ret = 0;
  3372. goto out;
  3373. }
  3374. /* at this point we can safely release the extent buffer */
  3375. num_pages = num_extent_pages(eb->start, eb->len);
  3376. for (i = 0; i < num_pages; i++)
  3377. page_cache_release(extent_buffer_page(eb, i));
  3378. rb_erase(&eb->rb_node, &tree->buffer);
  3379. __free_extent_buffer(eb);
  3380. out:
  3381. spin_unlock(&tree->buffer_lock);
  3382. return ret;
  3383. }
  3384. EXPORT_SYMBOL(try_release_extent_buffer);