extent_io.c 81 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303
  1. #include <linux/bitops.h>
  2. #include <linux/slab.h>
  3. #include <linux/bio.h>
  4. #include <linux/mm.h>
  5. #include <linux/gfp.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/page-flags.h>
  8. #include <linux/module.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/swap.h>
  12. #include <linux/version.h>
  13. #include <linux/writeback.h>
  14. #include <linux/pagevec.h>
  15. #include "extent_io.h"
  16. #include "extent_map.h"
  17. /* temporary define until extent_map moves out of btrfs */
  18. struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
  19. unsigned long extra_flags,
  20. void (*ctor)(void *, struct kmem_cache *,
  21. unsigned long));
  22. static struct kmem_cache *extent_state_cache;
  23. static struct kmem_cache *extent_buffer_cache;
  24. static LIST_HEAD(buffers);
  25. static LIST_HEAD(states);
  26. #define BUFFER_LRU_MAX 64
  27. struct tree_entry {
  28. u64 start;
  29. u64 end;
  30. struct rb_node rb_node;
  31. };
  32. struct extent_page_data {
  33. struct bio *bio;
  34. struct extent_io_tree *tree;
  35. get_extent_t *get_extent;
  36. };
  37. int __init extent_io_init(void)
  38. {
  39. extent_state_cache = btrfs_cache_create("extent_state",
  40. sizeof(struct extent_state), 0,
  41. NULL);
  42. if (!extent_state_cache)
  43. return -ENOMEM;
  44. extent_buffer_cache = btrfs_cache_create("extent_buffers",
  45. sizeof(struct extent_buffer), 0,
  46. NULL);
  47. if (!extent_buffer_cache)
  48. goto free_state_cache;
  49. return 0;
  50. free_state_cache:
  51. kmem_cache_destroy(extent_state_cache);
  52. return -ENOMEM;
  53. }
  54. void extent_io_exit(void)
  55. {
  56. struct extent_state *state;
  57. while (!list_empty(&states)) {
  58. state = list_entry(states.next, struct extent_state, list);
  59. printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs));
  60. list_del(&state->list);
  61. kmem_cache_free(extent_state_cache, state);
  62. }
  63. if (extent_state_cache)
  64. kmem_cache_destroy(extent_state_cache);
  65. if (extent_buffer_cache)
  66. kmem_cache_destroy(extent_buffer_cache);
  67. }
  68. void extent_io_tree_init(struct extent_io_tree *tree,
  69. struct address_space *mapping, gfp_t mask)
  70. {
  71. tree->state.rb_node = NULL;
  72. tree->ops = NULL;
  73. tree->dirty_bytes = 0;
  74. spin_lock_init(&tree->lock);
  75. spin_lock_init(&tree->lru_lock);
  76. tree->mapping = mapping;
  77. INIT_LIST_HEAD(&tree->buffer_lru);
  78. tree->lru_size = 0;
  79. tree->last = NULL;
  80. }
  81. EXPORT_SYMBOL(extent_io_tree_init);
  82. void extent_io_tree_empty_lru(struct extent_io_tree *tree)
  83. {
  84. struct extent_buffer *eb;
  85. while(!list_empty(&tree->buffer_lru)) {
  86. eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
  87. lru);
  88. list_del_init(&eb->lru);
  89. free_extent_buffer(eb);
  90. }
  91. }
  92. EXPORT_SYMBOL(extent_io_tree_empty_lru);
  93. struct extent_state *alloc_extent_state(gfp_t mask)
  94. {
  95. struct extent_state *state;
  96. state = kmem_cache_alloc(extent_state_cache, mask);
  97. if (!state || IS_ERR(state))
  98. return state;
  99. state->state = 0;
  100. state->private = 0;
  101. state->tree = NULL;
  102. atomic_set(&state->refs, 1);
  103. init_waitqueue_head(&state->wq);
  104. return state;
  105. }
  106. EXPORT_SYMBOL(alloc_extent_state);
  107. void free_extent_state(struct extent_state *state)
  108. {
  109. if (!state)
  110. return;
  111. if (atomic_dec_and_test(&state->refs)) {
  112. WARN_ON(state->tree);
  113. kmem_cache_free(extent_state_cache, state);
  114. }
  115. }
  116. EXPORT_SYMBOL(free_extent_state);
  117. static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
  118. struct rb_node *node)
  119. {
  120. struct rb_node ** p = &root->rb_node;
  121. struct rb_node * parent = NULL;
  122. struct tree_entry *entry;
  123. while(*p) {
  124. parent = *p;
  125. entry = rb_entry(parent, struct tree_entry, rb_node);
  126. if (offset < entry->start)
  127. p = &(*p)->rb_left;
  128. else if (offset > entry->end)
  129. p = &(*p)->rb_right;
  130. else
  131. return parent;
  132. }
  133. entry = rb_entry(node, struct tree_entry, rb_node);
  134. rb_link_node(node, parent, p);
  135. rb_insert_color(node, root);
  136. return NULL;
  137. }
  138. static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
  139. struct rb_node **prev_ret,
  140. struct rb_node **next_ret)
  141. {
  142. struct rb_root *root = &tree->state;
  143. struct rb_node * n = root->rb_node;
  144. struct rb_node *prev = NULL;
  145. struct rb_node *orig_prev = NULL;
  146. struct tree_entry *entry;
  147. struct tree_entry *prev_entry = NULL;
  148. if (tree->last) {
  149. struct extent_state *state;
  150. state = tree->last;
  151. if (state->start <= offset && offset <= state->end)
  152. return &tree->last->rb_node;
  153. }
  154. while(n) {
  155. entry = rb_entry(n, struct tree_entry, rb_node);
  156. prev = n;
  157. prev_entry = entry;
  158. if (offset < entry->start)
  159. n = n->rb_left;
  160. else if (offset > entry->end)
  161. n = n->rb_right;
  162. else {
  163. tree->last = rb_entry(n, struct extent_state, rb_node);
  164. return n;
  165. }
  166. }
  167. if (prev_ret) {
  168. orig_prev = prev;
  169. while(prev && offset > prev_entry->end) {
  170. prev = rb_next(prev);
  171. prev_entry = rb_entry(prev, struct tree_entry, rb_node);
  172. }
  173. *prev_ret = prev;
  174. prev = orig_prev;
  175. }
  176. if (next_ret) {
  177. prev_entry = rb_entry(prev, struct tree_entry, rb_node);
  178. while(prev && offset < prev_entry->start) {
  179. prev = rb_prev(prev);
  180. prev_entry = rb_entry(prev, struct tree_entry, rb_node);
  181. }
  182. *next_ret = prev;
  183. }
  184. return NULL;
  185. }
  186. static inline struct rb_node *tree_search(struct extent_io_tree *tree,
  187. u64 offset)
  188. {
  189. struct rb_node *prev = NULL;
  190. struct rb_node *ret;
  191. ret = __etree_search(tree, offset, &prev, NULL);
  192. if (!ret) {
  193. if (prev) {
  194. tree->last = rb_entry(prev, struct extent_state,
  195. rb_node);
  196. }
  197. return prev;
  198. }
  199. return ret;
  200. }
  201. /*
  202. * utility function to look for merge candidates inside a given range.
  203. * Any extents with matching state are merged together into a single
  204. * extent in the tree. Extents with EXTENT_IO in their state field
  205. * are not merged because the end_io handlers need to be able to do
  206. * operations on them without sleeping (or doing allocations/splits).
  207. *
  208. * This should be called with the tree lock held.
  209. */
  210. static int merge_state(struct extent_io_tree *tree,
  211. struct extent_state *state)
  212. {
  213. struct extent_state *other;
  214. struct rb_node *other_node;
  215. if (state->state & EXTENT_IOBITS)
  216. return 0;
  217. other_node = rb_prev(&state->rb_node);
  218. if (other_node) {
  219. other = rb_entry(other_node, struct extent_state, rb_node);
  220. if (other->end == state->start - 1 &&
  221. other->state == state->state) {
  222. state->start = other->start;
  223. other->tree = NULL;
  224. if (tree->last == other)
  225. tree->last = NULL;
  226. rb_erase(&other->rb_node, &tree->state);
  227. free_extent_state(other);
  228. }
  229. }
  230. other_node = rb_next(&state->rb_node);
  231. if (other_node) {
  232. other = rb_entry(other_node, struct extent_state, rb_node);
  233. if (other->start == state->end + 1 &&
  234. other->state == state->state) {
  235. other->start = state->start;
  236. state->tree = NULL;
  237. if (tree->last == state)
  238. tree->last = NULL;
  239. rb_erase(&state->rb_node, &tree->state);
  240. free_extent_state(state);
  241. }
  242. }
  243. return 0;
  244. }
  245. static void set_state_cb(struct extent_io_tree *tree,
  246. struct extent_state *state,
  247. unsigned long bits)
  248. {
  249. if (tree->ops && tree->ops->set_bit_hook) {
  250. tree->ops->set_bit_hook(tree->mapping->host, state->start,
  251. state->end, state->state, bits);
  252. }
  253. }
  254. static void clear_state_cb(struct extent_io_tree *tree,
  255. struct extent_state *state,
  256. unsigned long bits)
  257. {
  258. if (tree->ops && tree->ops->set_bit_hook) {
  259. tree->ops->clear_bit_hook(tree->mapping->host, state->start,
  260. state->end, state->state, bits);
  261. }
  262. }
  263. /*
  264. * insert an extent_state struct into the tree. 'bits' are set on the
  265. * struct before it is inserted.
  266. *
  267. * This may return -EEXIST if the extent is already there, in which case the
  268. * state struct is freed.
  269. *
  270. * The tree lock is not taken internally. This is a utility function and
  271. * probably isn't what you want to call (see set/clear_extent_bit).
  272. */
  273. static int insert_state(struct extent_io_tree *tree,
  274. struct extent_state *state, u64 start, u64 end,
  275. int bits)
  276. {
  277. struct rb_node *node;
  278. if (end < start) {
  279. printk("end < start %Lu %Lu\n", end, start);
  280. WARN_ON(1);
  281. }
  282. if (bits & EXTENT_DIRTY)
  283. tree->dirty_bytes += end - start + 1;
  284. set_state_cb(tree, state, bits);
  285. state->state |= bits;
  286. state->start = start;
  287. state->end = end;
  288. node = tree_insert(&tree->state, end, &state->rb_node);
  289. if (node) {
  290. struct extent_state *found;
  291. found = rb_entry(node, struct extent_state, rb_node);
  292. printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
  293. free_extent_state(state);
  294. return -EEXIST;
  295. }
  296. state->tree = tree;
  297. tree->last = state;
  298. merge_state(tree, state);
  299. return 0;
  300. }
  301. /*
  302. * split a given extent state struct in two, inserting the preallocated
  303. * struct 'prealloc' as the newly created second half. 'split' indicates an
  304. * offset inside 'orig' where it should be split.
  305. *
  306. * Before calling,
  307. * the tree has 'orig' at [orig->start, orig->end]. After calling, there
  308. * are two extent state structs in the tree:
  309. * prealloc: [orig->start, split - 1]
  310. * orig: [ split, orig->end ]
  311. *
  312. * The tree locks are not taken by this function. They need to be held
  313. * by the caller.
  314. */
  315. static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
  316. struct extent_state *prealloc, u64 split)
  317. {
  318. struct rb_node *node;
  319. prealloc->start = orig->start;
  320. prealloc->end = split - 1;
  321. prealloc->state = orig->state;
  322. orig->start = split;
  323. node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
  324. if (node) {
  325. struct extent_state *found;
  326. found = rb_entry(node, struct extent_state, rb_node);
  327. printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
  328. free_extent_state(prealloc);
  329. return -EEXIST;
  330. }
  331. prealloc->tree = tree;
  332. return 0;
  333. }
  334. /*
  335. * utility function to clear some bits in an extent state struct.
  336. * it will optionally wake up any one waiting on this state (wake == 1), or
  337. * forcibly remove the state from the tree (delete == 1).
  338. *
  339. * If no bits are set on the state struct after clearing things, the
  340. * struct is freed and removed from the tree
  341. */
  342. static int clear_state_bit(struct extent_io_tree *tree,
  343. struct extent_state *state, int bits, int wake,
  344. int delete)
  345. {
  346. int ret = state->state & bits;
  347. if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
  348. u64 range = state->end - state->start + 1;
  349. WARN_ON(range > tree->dirty_bytes);
  350. tree->dirty_bytes -= range;
  351. }
  352. clear_state_cb(tree, state, bits);
  353. state->state &= ~bits;
  354. if (wake)
  355. wake_up(&state->wq);
  356. if (delete || state->state == 0) {
  357. if (state->tree) {
  358. if (tree->last == state)
  359. tree->last = NULL;
  360. rb_erase(&state->rb_node, &tree->state);
  361. state->tree = NULL;
  362. free_extent_state(state);
  363. } else {
  364. WARN_ON(1);
  365. }
  366. } else {
  367. merge_state(tree, state);
  368. }
  369. return ret;
  370. }
  371. /*
  372. * clear some bits on a range in the tree. This may require splitting
  373. * or inserting elements in the tree, so the gfp mask is used to
  374. * indicate which allocations or sleeping are allowed.
  375. *
  376. * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
  377. * the given range from the tree regardless of state (ie for truncate).
  378. *
  379. * the range [start, end] is inclusive.
  380. *
  381. * This takes the tree lock, and returns < 0 on error, > 0 if any of the
  382. * bits were already set, or zero if none of the bits were already set.
  383. */
  384. int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  385. int bits, int wake, int delete, gfp_t mask)
  386. {
  387. struct extent_state *state;
  388. struct extent_state *prealloc = NULL;
  389. struct rb_node *node;
  390. unsigned long flags;
  391. int err;
  392. int set = 0;
  393. again:
  394. if (!prealloc && (mask & __GFP_WAIT)) {
  395. prealloc = alloc_extent_state(mask);
  396. if (!prealloc)
  397. return -ENOMEM;
  398. }
  399. spin_lock_irqsave(&tree->lock, flags);
  400. /*
  401. * this search will find the extents that end after
  402. * our range starts
  403. */
  404. node = tree_search(tree, start);
  405. if (!node)
  406. goto out;
  407. state = rb_entry(node, struct extent_state, rb_node);
  408. if (state->start > end)
  409. goto out;
  410. WARN_ON(state->end < start);
  411. /*
  412. * | ---- desired range ---- |
  413. * | state | or
  414. * | ------------- state -------------- |
  415. *
  416. * We need to split the extent we found, and may flip
  417. * bits on second half.
  418. *
  419. * If the extent we found extends past our range, we
  420. * just split and search again. It'll get split again
  421. * the next time though.
  422. *
  423. * If the extent we found is inside our range, we clear
  424. * the desired bit on it.
  425. */
  426. if (state->start < start) {
  427. if (!prealloc)
  428. prealloc = alloc_extent_state(GFP_ATOMIC);
  429. err = split_state(tree, state, prealloc, start);
  430. BUG_ON(err == -EEXIST);
  431. prealloc = NULL;
  432. if (err)
  433. goto out;
  434. if (state->end <= end) {
  435. start = state->end + 1;
  436. set |= clear_state_bit(tree, state, bits,
  437. wake, delete);
  438. } else {
  439. start = state->start;
  440. }
  441. goto search_again;
  442. }
  443. /*
  444. * | ---- desired range ---- |
  445. * | state |
  446. * We need to split the extent, and clear the bit
  447. * on the first half
  448. */
  449. if (state->start <= end && state->end > end) {
  450. if (!prealloc)
  451. prealloc = alloc_extent_state(GFP_ATOMIC);
  452. err = split_state(tree, state, prealloc, end + 1);
  453. BUG_ON(err == -EEXIST);
  454. if (wake)
  455. wake_up(&state->wq);
  456. set |= clear_state_bit(tree, prealloc, bits,
  457. wake, delete);
  458. prealloc = NULL;
  459. goto out;
  460. }
  461. start = state->end + 1;
  462. set |= clear_state_bit(tree, state, bits, wake, delete);
  463. goto search_again;
  464. out:
  465. spin_unlock_irqrestore(&tree->lock, flags);
  466. if (prealloc)
  467. free_extent_state(prealloc);
  468. return set;
  469. search_again:
  470. if (start > end)
  471. goto out;
  472. spin_unlock_irqrestore(&tree->lock, flags);
  473. if (mask & __GFP_WAIT)
  474. cond_resched();
  475. goto again;
  476. }
  477. EXPORT_SYMBOL(clear_extent_bit);
  478. static int wait_on_state(struct extent_io_tree *tree,
  479. struct extent_state *state)
  480. {
  481. DEFINE_WAIT(wait);
  482. prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
  483. spin_unlock_irq(&tree->lock);
  484. schedule();
  485. spin_lock_irq(&tree->lock);
  486. finish_wait(&state->wq, &wait);
  487. return 0;
  488. }
  489. /*
  490. * waits for one or more bits to clear on a range in the state tree.
  491. * The range [start, end] is inclusive.
  492. * The tree lock is taken by this function
  493. */
  494. int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
  495. {
  496. struct extent_state *state;
  497. struct rb_node *node;
  498. spin_lock_irq(&tree->lock);
  499. again:
  500. while (1) {
  501. /*
  502. * this search will find all the extents that end after
  503. * our range starts
  504. */
  505. node = tree_search(tree, start);
  506. if (!node)
  507. break;
  508. state = rb_entry(node, struct extent_state, rb_node);
  509. if (state->start > end)
  510. goto out;
  511. if (state->state & bits) {
  512. start = state->start;
  513. atomic_inc(&state->refs);
  514. wait_on_state(tree, state);
  515. free_extent_state(state);
  516. goto again;
  517. }
  518. start = state->end + 1;
  519. if (start > end)
  520. break;
  521. if (need_resched()) {
  522. spin_unlock_irq(&tree->lock);
  523. cond_resched();
  524. spin_lock_irq(&tree->lock);
  525. }
  526. }
  527. out:
  528. spin_unlock_irq(&tree->lock);
  529. return 0;
  530. }
  531. EXPORT_SYMBOL(wait_extent_bit);
  532. static void set_state_bits(struct extent_io_tree *tree,
  533. struct extent_state *state,
  534. int bits)
  535. {
  536. if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
  537. u64 range = state->end - state->start + 1;
  538. tree->dirty_bytes += range;
  539. }
  540. set_state_cb(tree, state, bits);
  541. state->state |= bits;
  542. }
  543. /*
  544. * set some bits on a range in the tree. This may require allocations
  545. * or sleeping, so the gfp mask is used to indicate what is allowed.
  546. *
  547. * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
  548. * range already has the desired bits set. The start of the existing
  549. * range is returned in failed_start in this case.
  550. *
  551. * [start, end] is inclusive
  552. * This takes the tree lock.
  553. */
  554. int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
  555. int exclusive, u64 *failed_start, gfp_t mask)
  556. {
  557. struct extent_state *state;
  558. struct extent_state *prealloc = NULL;
  559. struct rb_node *node;
  560. unsigned long flags;
  561. int err = 0;
  562. int set;
  563. u64 last_start;
  564. u64 last_end;
  565. again:
  566. if (!prealloc && (mask & __GFP_WAIT)) {
  567. prealloc = alloc_extent_state(mask);
  568. if (!prealloc)
  569. return -ENOMEM;
  570. }
  571. spin_lock_irqsave(&tree->lock, flags);
  572. /*
  573. * this search will find all the extents that end after
  574. * our range starts.
  575. */
  576. node = tree_search(tree, start);
  577. if (!node) {
  578. err = insert_state(tree, prealloc, start, end, bits);
  579. prealloc = NULL;
  580. BUG_ON(err == -EEXIST);
  581. goto out;
  582. }
  583. state = rb_entry(node, struct extent_state, rb_node);
  584. last_start = state->start;
  585. last_end = state->end;
  586. /*
  587. * | ---- desired range ---- |
  588. * | state |
  589. *
  590. * Just lock what we found and keep going
  591. */
  592. if (state->start == start && state->end <= end) {
  593. set = state->state & bits;
  594. if (set && exclusive) {
  595. *failed_start = state->start;
  596. err = -EEXIST;
  597. goto out;
  598. }
  599. set_state_bits(tree, state, bits);
  600. start = state->end + 1;
  601. merge_state(tree, state);
  602. goto search_again;
  603. }
  604. /*
  605. * | ---- desired range ---- |
  606. * | state |
  607. * or
  608. * | ------------- state -------------- |
  609. *
  610. * We need to split the extent we found, and may flip bits on
  611. * second half.
  612. *
  613. * If the extent we found extends past our
  614. * range, we just split and search again. It'll get split
  615. * again the next time though.
  616. *
  617. * If the extent we found is inside our range, we set the
  618. * desired bit on it.
  619. */
  620. if (state->start < start) {
  621. set = state->state & bits;
  622. if (exclusive && set) {
  623. *failed_start = start;
  624. err = -EEXIST;
  625. goto out;
  626. }
  627. err = split_state(tree, state, prealloc, start);
  628. BUG_ON(err == -EEXIST);
  629. prealloc = NULL;
  630. if (err)
  631. goto out;
  632. if (state->end <= end) {
  633. set_state_bits(tree, state, bits);
  634. start = state->end + 1;
  635. merge_state(tree, state);
  636. } else {
  637. start = state->start;
  638. }
  639. goto search_again;
  640. }
  641. /*
  642. * | ---- desired range ---- |
  643. * | state | or | state |
  644. *
  645. * There's a hole, we need to insert something in it and
  646. * ignore the extent we found.
  647. */
  648. if (state->start > start) {
  649. u64 this_end;
  650. if (end < last_start)
  651. this_end = end;
  652. else
  653. this_end = last_start -1;
  654. err = insert_state(tree, prealloc, start, this_end,
  655. bits);
  656. prealloc = NULL;
  657. BUG_ON(err == -EEXIST);
  658. if (err)
  659. goto out;
  660. start = this_end + 1;
  661. goto search_again;
  662. }
  663. /*
  664. * | ---- desired range ---- |
  665. * | state |
  666. * We need to split the extent, and set the bit
  667. * on the first half
  668. */
  669. if (state->start <= end && state->end > end) {
  670. set = state->state & bits;
  671. if (exclusive && set) {
  672. *failed_start = start;
  673. err = -EEXIST;
  674. goto out;
  675. }
  676. err = split_state(tree, state, prealloc, end + 1);
  677. BUG_ON(err == -EEXIST);
  678. set_state_bits(tree, prealloc, bits);
  679. merge_state(tree, prealloc);
  680. prealloc = NULL;
  681. goto out;
  682. }
  683. goto search_again;
  684. out:
  685. spin_unlock_irqrestore(&tree->lock, flags);
  686. if (prealloc)
  687. free_extent_state(prealloc);
  688. return err;
  689. search_again:
  690. if (start > end)
  691. goto out;
  692. spin_unlock_irqrestore(&tree->lock, flags);
  693. if (mask & __GFP_WAIT)
  694. cond_resched();
  695. goto again;
  696. }
  697. EXPORT_SYMBOL(set_extent_bit);
  698. /* wrappers around set/clear extent bit */
  699. int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
  700. gfp_t mask)
  701. {
  702. return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
  703. mask);
  704. }
  705. EXPORT_SYMBOL(set_extent_dirty);
  706. int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
  707. int bits, gfp_t mask)
  708. {
  709. return set_extent_bit(tree, start, end, bits, 0, NULL,
  710. mask);
  711. }
  712. EXPORT_SYMBOL(set_extent_bits);
  713. int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
  714. int bits, gfp_t mask)
  715. {
  716. return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
  717. }
  718. EXPORT_SYMBOL(clear_extent_bits);
  719. int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
  720. gfp_t mask)
  721. {
  722. return set_extent_bit(tree, start, end,
  723. EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
  724. mask);
  725. }
  726. EXPORT_SYMBOL(set_extent_delalloc);
  727. int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
  728. gfp_t mask)
  729. {
  730. return clear_extent_bit(tree, start, end,
  731. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
  732. }
  733. EXPORT_SYMBOL(clear_extent_dirty);
  734. int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
  735. gfp_t mask)
  736. {
  737. return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
  738. mask);
  739. }
  740. EXPORT_SYMBOL(set_extent_new);
  741. int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
  742. gfp_t mask)
  743. {
  744. return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
  745. }
  746. EXPORT_SYMBOL(clear_extent_new);
  747. int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
  748. gfp_t mask)
  749. {
  750. return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
  751. mask);
  752. }
  753. EXPORT_SYMBOL(set_extent_uptodate);
  754. int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
  755. gfp_t mask)
  756. {
  757. return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
  758. }
  759. EXPORT_SYMBOL(clear_extent_uptodate);
  760. int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
  761. gfp_t mask)
  762. {
  763. return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
  764. 0, NULL, mask);
  765. }
  766. EXPORT_SYMBOL(set_extent_writeback);
  767. int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
  768. gfp_t mask)
  769. {
  770. return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
  771. }
  772. EXPORT_SYMBOL(clear_extent_writeback);
  773. int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
  774. {
  775. return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
  776. }
  777. EXPORT_SYMBOL(wait_on_extent_writeback);
  778. int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
  779. {
  780. int err;
  781. u64 failed_start;
  782. while (1) {
  783. err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
  784. &failed_start, mask);
  785. if (err == -EEXIST && (mask & __GFP_WAIT)) {
  786. wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
  787. start = failed_start;
  788. } else {
  789. break;
  790. }
  791. WARN_ON(start > end);
  792. }
  793. return err;
  794. }
  795. EXPORT_SYMBOL(lock_extent);
  796. int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
  797. gfp_t mask)
  798. {
  799. return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
  800. }
  801. EXPORT_SYMBOL(unlock_extent);
  802. /*
  803. * helper function to set pages and extents in the tree dirty
  804. */
  805. int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
  806. {
  807. unsigned long index = start >> PAGE_CACHE_SHIFT;
  808. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  809. struct page *page;
  810. while (index <= end_index) {
  811. page = find_get_page(tree->mapping, index);
  812. BUG_ON(!page);
  813. __set_page_dirty_nobuffers(page);
  814. page_cache_release(page);
  815. index++;
  816. }
  817. set_extent_dirty(tree, start, end, GFP_NOFS);
  818. return 0;
  819. }
  820. EXPORT_SYMBOL(set_range_dirty);
  821. /*
  822. * helper function to set both pages and extents in the tree writeback
  823. */
  824. int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
  825. {
  826. unsigned long index = start >> PAGE_CACHE_SHIFT;
  827. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  828. struct page *page;
  829. while (index <= end_index) {
  830. page = find_get_page(tree->mapping, index);
  831. BUG_ON(!page);
  832. set_page_writeback(page);
  833. page_cache_release(page);
  834. index++;
  835. }
  836. set_extent_writeback(tree, start, end, GFP_NOFS);
  837. return 0;
  838. }
  839. EXPORT_SYMBOL(set_range_writeback);
  840. int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
  841. u64 *start_ret, u64 *end_ret, int bits)
  842. {
  843. struct rb_node *node;
  844. struct extent_state *state;
  845. int ret = 1;
  846. spin_lock_irq(&tree->lock);
  847. /*
  848. * this search will find all the extents that end after
  849. * our range starts.
  850. */
  851. node = tree_search(tree, start);
  852. if (!node || IS_ERR(node)) {
  853. goto out;
  854. }
  855. while(1) {
  856. state = rb_entry(node, struct extent_state, rb_node);
  857. if (state->end >= start && (state->state & bits)) {
  858. *start_ret = state->start;
  859. *end_ret = state->end;
  860. ret = 0;
  861. break;
  862. }
  863. node = rb_next(node);
  864. if (!node)
  865. break;
  866. }
  867. out:
  868. spin_unlock_irq(&tree->lock);
  869. return ret;
  870. }
  871. EXPORT_SYMBOL(find_first_extent_bit);
  872. u64 find_lock_delalloc_range(struct extent_io_tree *tree,
  873. u64 *start, u64 *end, u64 max_bytes)
  874. {
  875. struct rb_node *node;
  876. struct extent_state *state;
  877. u64 cur_start = *start;
  878. u64 found = 0;
  879. u64 total_bytes = 0;
  880. spin_lock_irq(&tree->lock);
  881. /*
  882. * this search will find all the extents that end after
  883. * our range starts.
  884. */
  885. search_again:
  886. node = tree_search(tree, cur_start);
  887. if (!node || IS_ERR(node)) {
  888. *end = (u64)-1;
  889. goto out;
  890. }
  891. while(1) {
  892. state = rb_entry(node, struct extent_state, rb_node);
  893. if (found && state->start != cur_start) {
  894. goto out;
  895. }
  896. if (!(state->state & EXTENT_DELALLOC)) {
  897. if (!found)
  898. *end = state->end;
  899. goto out;
  900. }
  901. if (!found) {
  902. struct extent_state *prev_state;
  903. struct rb_node *prev_node = node;
  904. while(1) {
  905. prev_node = rb_prev(prev_node);
  906. if (!prev_node)
  907. break;
  908. prev_state = rb_entry(prev_node,
  909. struct extent_state,
  910. rb_node);
  911. if (!(prev_state->state & EXTENT_DELALLOC))
  912. break;
  913. state = prev_state;
  914. node = prev_node;
  915. }
  916. }
  917. if (state->state & EXTENT_LOCKED) {
  918. DEFINE_WAIT(wait);
  919. atomic_inc(&state->refs);
  920. prepare_to_wait(&state->wq, &wait,
  921. TASK_UNINTERRUPTIBLE);
  922. spin_unlock_irq(&tree->lock);
  923. schedule();
  924. spin_lock_irq(&tree->lock);
  925. finish_wait(&state->wq, &wait);
  926. free_extent_state(state);
  927. goto search_again;
  928. }
  929. set_state_cb(tree, state, EXTENT_LOCKED);
  930. state->state |= EXTENT_LOCKED;
  931. if (!found)
  932. *start = state->start;
  933. found++;
  934. *end = state->end;
  935. cur_start = state->end + 1;
  936. node = rb_next(node);
  937. if (!node)
  938. break;
  939. total_bytes += state->end - state->start + 1;
  940. if (total_bytes >= max_bytes)
  941. break;
  942. }
  943. out:
  944. spin_unlock_irq(&tree->lock);
  945. return found;
  946. }
  947. u64 count_range_bits(struct extent_io_tree *tree,
  948. u64 *start, u64 search_end, u64 max_bytes,
  949. unsigned long bits)
  950. {
  951. struct rb_node *node;
  952. struct extent_state *state;
  953. u64 cur_start = *start;
  954. u64 total_bytes = 0;
  955. int found = 0;
  956. if (search_end <= cur_start) {
  957. printk("search_end %Lu start %Lu\n", search_end, cur_start);
  958. WARN_ON(1);
  959. return 0;
  960. }
  961. spin_lock_irq(&tree->lock);
  962. if (cur_start == 0 && bits == EXTENT_DIRTY) {
  963. total_bytes = tree->dirty_bytes;
  964. goto out;
  965. }
  966. /*
  967. * this search will find all the extents that end after
  968. * our range starts.
  969. */
  970. node = tree_search(tree, cur_start);
  971. if (!node || IS_ERR(node)) {
  972. goto out;
  973. }
  974. while(1) {
  975. state = rb_entry(node, struct extent_state, rb_node);
  976. if (state->start > search_end)
  977. break;
  978. if (state->end >= cur_start && (state->state & bits)) {
  979. total_bytes += min(search_end, state->end) + 1 -
  980. max(cur_start, state->start);
  981. if (total_bytes >= max_bytes)
  982. break;
  983. if (!found) {
  984. *start = state->start;
  985. found = 1;
  986. }
  987. }
  988. node = rb_next(node);
  989. if (!node)
  990. break;
  991. }
  992. out:
  993. spin_unlock_irq(&tree->lock);
  994. return total_bytes;
  995. }
  996. /*
  997. * helper function to lock both pages and extents in the tree.
  998. * pages must be locked first.
  999. */
  1000. int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
  1001. {
  1002. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1003. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1004. struct page *page;
  1005. int err;
  1006. while (index <= end_index) {
  1007. page = grab_cache_page(tree->mapping, index);
  1008. if (!page) {
  1009. err = -ENOMEM;
  1010. goto failed;
  1011. }
  1012. if (IS_ERR(page)) {
  1013. err = PTR_ERR(page);
  1014. goto failed;
  1015. }
  1016. index++;
  1017. }
  1018. lock_extent(tree, start, end, GFP_NOFS);
  1019. return 0;
  1020. failed:
  1021. /*
  1022. * we failed above in getting the page at 'index', so we undo here
  1023. * up to but not including the page at 'index'
  1024. */
  1025. end_index = index;
  1026. index = start >> PAGE_CACHE_SHIFT;
  1027. while (index < end_index) {
  1028. page = find_get_page(tree->mapping, index);
  1029. unlock_page(page);
  1030. page_cache_release(page);
  1031. index++;
  1032. }
  1033. return err;
  1034. }
  1035. EXPORT_SYMBOL(lock_range);
  1036. /*
  1037. * helper function to unlock both pages and extents in the tree.
  1038. */
  1039. int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
  1040. {
  1041. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1042. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1043. struct page *page;
  1044. while (index <= end_index) {
  1045. page = find_get_page(tree->mapping, index);
  1046. unlock_page(page);
  1047. page_cache_release(page);
  1048. index++;
  1049. }
  1050. unlock_extent(tree, start, end, GFP_NOFS);
  1051. return 0;
  1052. }
  1053. EXPORT_SYMBOL(unlock_range);
  1054. int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
  1055. {
  1056. struct rb_node *node;
  1057. struct extent_state *state;
  1058. int ret = 0;
  1059. spin_lock_irq(&tree->lock);
  1060. /*
  1061. * this search will find all the extents that end after
  1062. * our range starts.
  1063. */
  1064. node = tree_search(tree, start);
  1065. if (!node || IS_ERR(node)) {
  1066. ret = -ENOENT;
  1067. goto out;
  1068. }
  1069. state = rb_entry(node, struct extent_state, rb_node);
  1070. if (state->start != start) {
  1071. ret = -ENOENT;
  1072. goto out;
  1073. }
  1074. state->private = private;
  1075. out:
  1076. spin_unlock_irq(&tree->lock);
  1077. return ret;
  1078. }
  1079. int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
  1080. {
  1081. struct rb_node *node;
  1082. struct extent_state *state;
  1083. int ret = 0;
  1084. spin_lock_irq(&tree->lock);
  1085. /*
  1086. * this search will find all the extents that end after
  1087. * our range starts.
  1088. */
  1089. node = tree_search(tree, start);
  1090. if (!node || IS_ERR(node)) {
  1091. ret = -ENOENT;
  1092. goto out;
  1093. }
  1094. state = rb_entry(node, struct extent_state, rb_node);
  1095. if (state->start != start) {
  1096. ret = -ENOENT;
  1097. goto out;
  1098. }
  1099. *private = state->private;
  1100. out:
  1101. spin_unlock_irq(&tree->lock);
  1102. return ret;
  1103. }
  1104. /*
  1105. * searches a range in the state tree for a given mask.
  1106. * If 'filled' == 1, this returns 1 only if every extent in the tree
  1107. * has the bits set. Otherwise, 1 is returned if any bit in the
  1108. * range is found set.
  1109. */
  1110. int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
  1111. int bits, int filled)
  1112. {
  1113. struct extent_state *state = NULL;
  1114. struct rb_node *node;
  1115. int bitset = 0;
  1116. unsigned long flags;
  1117. spin_lock_irqsave(&tree->lock, flags);
  1118. node = tree_search(tree, start);
  1119. while (node && start <= end) {
  1120. state = rb_entry(node, struct extent_state, rb_node);
  1121. if (filled && state->start > start) {
  1122. bitset = 0;
  1123. break;
  1124. }
  1125. if (state->start > end)
  1126. break;
  1127. if (state->state & bits) {
  1128. bitset = 1;
  1129. if (!filled)
  1130. break;
  1131. } else if (filled) {
  1132. bitset = 0;
  1133. break;
  1134. }
  1135. start = state->end + 1;
  1136. if (start > end)
  1137. break;
  1138. node = rb_next(node);
  1139. if (!node) {
  1140. if (filled)
  1141. bitset = 0;
  1142. break;
  1143. }
  1144. }
  1145. spin_unlock_irqrestore(&tree->lock, flags);
  1146. return bitset;
  1147. }
  1148. EXPORT_SYMBOL(test_range_bit);
  1149. /*
  1150. * helper function to set a given page up to date if all the
  1151. * extents in the tree for that page are up to date
  1152. */
  1153. static int check_page_uptodate(struct extent_io_tree *tree,
  1154. struct page *page)
  1155. {
  1156. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1157. u64 end = start + PAGE_CACHE_SIZE - 1;
  1158. if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
  1159. SetPageUptodate(page);
  1160. return 0;
  1161. }
  1162. /*
  1163. * helper function to unlock a page if all the extents in the tree
  1164. * for that page are unlocked
  1165. */
  1166. static int check_page_locked(struct extent_io_tree *tree,
  1167. struct page *page)
  1168. {
  1169. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1170. u64 end = start + PAGE_CACHE_SIZE - 1;
  1171. if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
  1172. unlock_page(page);
  1173. return 0;
  1174. }
  1175. /*
  1176. * helper function to end page writeback if all the extents
  1177. * in the tree for that page are done with writeback
  1178. */
  1179. static int check_page_writeback(struct extent_io_tree *tree,
  1180. struct page *page)
  1181. {
  1182. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1183. u64 end = start + PAGE_CACHE_SIZE - 1;
  1184. if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
  1185. end_page_writeback(page);
  1186. return 0;
  1187. }
  1188. /* lots and lots of room for performance fixes in the end_bio funcs */
  1189. /*
  1190. * after a writepage IO is done, we need to:
  1191. * clear the uptodate bits on error
  1192. * clear the writeback bits in the extent tree for this IO
  1193. * end_page_writeback if the page has no more pending IO
  1194. *
  1195. * Scheduling is not allowed, so the extent state tree is expected
  1196. * to have one and only one object corresponding to this IO.
  1197. */
  1198. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  1199. static void end_bio_extent_writepage(struct bio *bio, int err)
  1200. #else
  1201. static int end_bio_extent_writepage(struct bio *bio,
  1202. unsigned int bytes_done, int err)
  1203. #endif
  1204. {
  1205. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1206. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1207. struct extent_state *state = bio->bi_private;
  1208. struct extent_io_tree *tree = state->tree;
  1209. struct rb_node *node;
  1210. u64 start;
  1211. u64 end;
  1212. u64 cur;
  1213. int whole_page;
  1214. unsigned long flags;
  1215. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1216. if (bio->bi_size)
  1217. return 1;
  1218. #endif
  1219. do {
  1220. struct page *page = bvec->bv_page;
  1221. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1222. bvec->bv_offset;
  1223. end = start + bvec->bv_len - 1;
  1224. if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
  1225. whole_page = 1;
  1226. else
  1227. whole_page = 0;
  1228. if (--bvec >= bio->bi_io_vec)
  1229. prefetchw(&bvec->bv_page->flags);
  1230. if (!uptodate) {
  1231. clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1232. ClearPageUptodate(page);
  1233. SetPageError(page);
  1234. }
  1235. if (tree->ops && tree->ops->writepage_end_io_hook) {
  1236. tree->ops->writepage_end_io_hook(page, start, end,
  1237. state);
  1238. }
  1239. /*
  1240. * bios can get merged in funny ways, and so we need to
  1241. * be careful with the state variable. We know the
  1242. * state won't be merged with others because it has
  1243. * WRITEBACK set, but we can't be sure each biovec is
  1244. * sequential in the file. So, if our cached state
  1245. * doesn't match the expected end, search the tree
  1246. * for the correct one.
  1247. */
  1248. spin_lock_irqsave(&tree->lock, flags);
  1249. if (!state || state->end != end) {
  1250. state = NULL;
  1251. node = __etree_search(tree, start, NULL, NULL);
  1252. if (node) {
  1253. state = rb_entry(node, struct extent_state,
  1254. rb_node);
  1255. if (state->end != end ||
  1256. !(state->state & EXTENT_WRITEBACK))
  1257. state = NULL;
  1258. }
  1259. if (!state) {
  1260. spin_unlock_irqrestore(&tree->lock, flags);
  1261. clear_extent_writeback(tree, start,
  1262. end, GFP_ATOMIC);
  1263. goto next_io;
  1264. }
  1265. }
  1266. cur = end;
  1267. while(1) {
  1268. struct extent_state *clear = state;
  1269. cur = state->start;
  1270. node = rb_prev(&state->rb_node);
  1271. if (node) {
  1272. state = rb_entry(node,
  1273. struct extent_state,
  1274. rb_node);
  1275. } else {
  1276. state = NULL;
  1277. }
  1278. clear_state_bit(tree, clear, EXTENT_WRITEBACK,
  1279. 1, 0);
  1280. if (cur == start)
  1281. break;
  1282. if (cur < start) {
  1283. WARN_ON(1);
  1284. break;
  1285. }
  1286. if (!node)
  1287. break;
  1288. }
  1289. /* before releasing the lock, make sure the next state
  1290. * variable has the expected bits set and corresponds
  1291. * to the correct offsets in the file
  1292. */
  1293. if (state && (state->end + 1 != start ||
  1294. !state->state & EXTENT_WRITEBACK)) {
  1295. state = NULL;
  1296. }
  1297. spin_unlock_irqrestore(&tree->lock, flags);
  1298. next_io:
  1299. if (whole_page)
  1300. end_page_writeback(page);
  1301. else
  1302. check_page_writeback(tree, page);
  1303. } while (bvec >= bio->bi_io_vec);
  1304. bio_put(bio);
  1305. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1306. return 0;
  1307. #endif
  1308. }
  1309. /*
  1310. * after a readpage IO is done, we need to:
  1311. * clear the uptodate bits on error
  1312. * set the uptodate bits if things worked
  1313. * set the page up to date if all extents in the tree are uptodate
  1314. * clear the lock bit in the extent tree
  1315. * unlock the page if there are no other extents locked for it
  1316. *
  1317. * Scheduling is not allowed, so the extent state tree is expected
  1318. * to have one and only one object corresponding to this IO.
  1319. */
  1320. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  1321. static void end_bio_extent_readpage(struct bio *bio, int err)
  1322. #else
  1323. static int end_bio_extent_readpage(struct bio *bio,
  1324. unsigned int bytes_done, int err)
  1325. #endif
  1326. {
  1327. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1328. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1329. struct extent_state *state = bio->bi_private;
  1330. struct extent_io_tree *tree = state->tree;
  1331. struct rb_node *node;
  1332. u64 start;
  1333. u64 end;
  1334. u64 cur;
  1335. unsigned long flags;
  1336. int whole_page;
  1337. int ret;
  1338. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1339. if (bio->bi_size)
  1340. return 1;
  1341. #endif
  1342. do {
  1343. struct page *page = bvec->bv_page;
  1344. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1345. bvec->bv_offset;
  1346. end = start + bvec->bv_len - 1;
  1347. if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
  1348. whole_page = 1;
  1349. else
  1350. whole_page = 0;
  1351. if (--bvec >= bio->bi_io_vec)
  1352. prefetchw(&bvec->bv_page->flags);
  1353. if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
  1354. ret = tree->ops->readpage_end_io_hook(page, start, end,
  1355. state);
  1356. if (ret)
  1357. uptodate = 0;
  1358. }
  1359. spin_lock_irqsave(&tree->lock, flags);
  1360. if (!state || state->end != end) {
  1361. state = NULL;
  1362. node = __etree_search(tree, start, NULL, NULL);
  1363. if (node) {
  1364. state = rb_entry(node, struct extent_state,
  1365. rb_node);
  1366. if (state->end != end ||
  1367. !(state->state & EXTENT_LOCKED))
  1368. state = NULL;
  1369. }
  1370. if (!state) {
  1371. spin_unlock_irqrestore(&tree->lock, flags);
  1372. set_extent_uptodate(tree, start, end,
  1373. GFP_ATOMIC);
  1374. unlock_extent(tree, start, end, GFP_ATOMIC);
  1375. goto next_io;
  1376. }
  1377. }
  1378. cur = end;
  1379. while(1) {
  1380. struct extent_state *clear = state;
  1381. cur = state->start;
  1382. node = rb_prev(&state->rb_node);
  1383. if (node) {
  1384. state = rb_entry(node,
  1385. struct extent_state,
  1386. rb_node);
  1387. } else {
  1388. state = NULL;
  1389. }
  1390. set_state_cb(tree, clear, EXTENT_UPTODATE);
  1391. clear->state |= EXTENT_UPTODATE;
  1392. clear_state_bit(tree, clear, EXTENT_LOCKED,
  1393. 1, 0);
  1394. if (cur == start)
  1395. break;
  1396. if (cur < start) {
  1397. WARN_ON(1);
  1398. break;
  1399. }
  1400. if (!node)
  1401. break;
  1402. }
  1403. /* before releasing the lock, make sure the next state
  1404. * variable has the expected bits set and corresponds
  1405. * to the correct offsets in the file
  1406. */
  1407. if (state && (state->end + 1 != start ||
  1408. !state->state & EXTENT_WRITEBACK)) {
  1409. state = NULL;
  1410. }
  1411. spin_unlock_irqrestore(&tree->lock, flags);
  1412. next_io:
  1413. if (whole_page) {
  1414. if (uptodate) {
  1415. SetPageUptodate(page);
  1416. } else {
  1417. ClearPageUptodate(page);
  1418. SetPageError(page);
  1419. }
  1420. unlock_page(page);
  1421. } else {
  1422. if (uptodate) {
  1423. check_page_uptodate(tree, page);
  1424. } else {
  1425. ClearPageUptodate(page);
  1426. SetPageError(page);
  1427. }
  1428. check_page_locked(tree, page);
  1429. }
  1430. } while (bvec >= bio->bi_io_vec);
  1431. bio_put(bio);
  1432. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1433. return 0;
  1434. #endif
  1435. }
  1436. /*
  1437. * IO done from prepare_write is pretty simple, we just unlock
  1438. * the structs in the extent tree when done, and set the uptodate bits
  1439. * as appropriate.
  1440. */
  1441. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  1442. static void end_bio_extent_preparewrite(struct bio *bio, int err)
  1443. #else
  1444. static int end_bio_extent_preparewrite(struct bio *bio,
  1445. unsigned int bytes_done, int err)
  1446. #endif
  1447. {
  1448. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1449. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1450. struct extent_state *state = bio->bi_private;
  1451. struct extent_io_tree *tree = state->tree;
  1452. u64 start;
  1453. u64 end;
  1454. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1455. if (bio->bi_size)
  1456. return 1;
  1457. #endif
  1458. do {
  1459. struct page *page = bvec->bv_page;
  1460. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1461. bvec->bv_offset;
  1462. end = start + bvec->bv_len - 1;
  1463. if (--bvec >= bio->bi_io_vec)
  1464. prefetchw(&bvec->bv_page->flags);
  1465. if (uptodate) {
  1466. set_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1467. } else {
  1468. ClearPageUptodate(page);
  1469. SetPageError(page);
  1470. }
  1471. unlock_extent(tree, start, end, GFP_ATOMIC);
  1472. } while (bvec >= bio->bi_io_vec);
  1473. bio_put(bio);
  1474. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1475. return 0;
  1476. #endif
  1477. }
  1478. static struct bio *
  1479. extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
  1480. gfp_t gfp_flags)
  1481. {
  1482. struct bio *bio;
  1483. bio = bio_alloc(gfp_flags, nr_vecs);
  1484. if (bio == NULL && (current->flags & PF_MEMALLOC)) {
  1485. while (!bio && (nr_vecs /= 2))
  1486. bio = bio_alloc(gfp_flags, nr_vecs);
  1487. }
  1488. if (bio) {
  1489. bio->bi_bdev = bdev;
  1490. bio->bi_sector = first_sector;
  1491. }
  1492. return bio;
  1493. }
  1494. static int submit_one_bio(int rw, struct bio *bio)
  1495. {
  1496. u64 maxsector;
  1497. int ret = 0;
  1498. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1499. struct page *page = bvec->bv_page;
  1500. struct extent_io_tree *tree = bio->bi_private;
  1501. struct rb_node *node;
  1502. struct extent_state *state;
  1503. u64 start;
  1504. u64 end;
  1505. start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
  1506. end = start + bvec->bv_len - 1;
  1507. spin_lock_irq(&tree->lock);
  1508. node = __etree_search(tree, start, NULL, NULL);
  1509. BUG_ON(!node);
  1510. state = rb_entry(node, struct extent_state, rb_node);
  1511. while(state->end < end) {
  1512. node = rb_next(node);
  1513. state = rb_entry(node, struct extent_state, rb_node);
  1514. }
  1515. BUG_ON(state->end != end);
  1516. spin_unlock_irq(&tree->lock);
  1517. bio->bi_private = state;
  1518. bio_get(bio);
  1519. maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
  1520. if (maxsector < bio->bi_sector) {
  1521. printk("sector too large max %Lu got %llu\n", maxsector,
  1522. (unsigned long long)bio->bi_sector);
  1523. WARN_ON(1);
  1524. }
  1525. submit_bio(rw, bio);
  1526. if (bio_flagged(bio, BIO_EOPNOTSUPP))
  1527. ret = -EOPNOTSUPP;
  1528. bio_put(bio);
  1529. return ret;
  1530. }
  1531. static int submit_extent_page(int rw, struct extent_io_tree *tree,
  1532. struct page *page, sector_t sector,
  1533. size_t size, unsigned long offset,
  1534. struct block_device *bdev,
  1535. struct bio **bio_ret,
  1536. unsigned long max_pages,
  1537. bio_end_io_t end_io_func)
  1538. {
  1539. int ret = 0;
  1540. struct bio *bio;
  1541. int nr;
  1542. if (bio_ret && *bio_ret) {
  1543. bio = *bio_ret;
  1544. if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
  1545. bio_add_page(bio, page, size, offset) < size) {
  1546. ret = submit_one_bio(rw, bio);
  1547. bio = NULL;
  1548. } else {
  1549. return 0;
  1550. }
  1551. }
  1552. nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
  1553. bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
  1554. if (!bio) {
  1555. printk("failed to allocate bio nr %d\n", nr);
  1556. }
  1557. bio_add_page(bio, page, size, offset);
  1558. bio->bi_end_io = end_io_func;
  1559. bio->bi_private = tree;
  1560. if (bio_ret) {
  1561. *bio_ret = bio;
  1562. } else {
  1563. ret = submit_one_bio(rw, bio);
  1564. }
  1565. return ret;
  1566. }
  1567. void set_page_extent_mapped(struct page *page)
  1568. {
  1569. if (!PagePrivate(page)) {
  1570. SetPagePrivate(page);
  1571. WARN_ON(!page->mapping->a_ops->invalidatepage);
  1572. set_page_private(page, EXTENT_PAGE_PRIVATE);
  1573. page_cache_get(page);
  1574. }
  1575. }
  1576. void set_page_extent_head(struct page *page, unsigned long len)
  1577. {
  1578. set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
  1579. }
  1580. /*
  1581. * basic readpage implementation. Locked extent state structs are inserted
  1582. * into the tree that are removed when the IO is done (by the end_io
  1583. * handlers)
  1584. */
  1585. static int __extent_read_full_page(struct extent_io_tree *tree,
  1586. struct page *page,
  1587. get_extent_t *get_extent,
  1588. struct bio **bio)
  1589. {
  1590. struct inode *inode = page->mapping->host;
  1591. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1592. u64 page_end = start + PAGE_CACHE_SIZE - 1;
  1593. u64 end;
  1594. u64 cur = start;
  1595. u64 extent_offset;
  1596. u64 last_byte = i_size_read(inode);
  1597. u64 block_start;
  1598. u64 cur_end;
  1599. sector_t sector;
  1600. struct extent_map *em;
  1601. struct block_device *bdev;
  1602. int ret;
  1603. int nr = 0;
  1604. size_t page_offset = 0;
  1605. size_t iosize;
  1606. size_t blocksize = inode->i_sb->s_blocksize;
  1607. set_page_extent_mapped(page);
  1608. end = page_end;
  1609. lock_extent(tree, start, end, GFP_NOFS);
  1610. while (cur <= end) {
  1611. if (cur >= last_byte) {
  1612. char *userpage;
  1613. iosize = PAGE_CACHE_SIZE - page_offset;
  1614. userpage = kmap_atomic(page, KM_USER0);
  1615. memset(userpage + page_offset, 0, iosize);
  1616. flush_dcache_page(page);
  1617. kunmap_atomic(userpage, KM_USER0);
  1618. set_extent_uptodate(tree, cur, cur + iosize - 1,
  1619. GFP_NOFS);
  1620. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1621. break;
  1622. }
  1623. em = get_extent(inode, page, page_offset, cur,
  1624. end - cur + 1, 0);
  1625. if (IS_ERR(em) || !em) {
  1626. SetPageError(page);
  1627. unlock_extent(tree, cur, end, GFP_NOFS);
  1628. break;
  1629. }
  1630. extent_offset = cur - em->start;
  1631. BUG_ON(extent_map_end(em) <= cur);
  1632. BUG_ON(end < cur);
  1633. iosize = min(extent_map_end(em) - cur, end - cur + 1);
  1634. cur_end = min(extent_map_end(em) - 1, end);
  1635. iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
  1636. sector = (em->block_start + extent_offset) >> 9;
  1637. bdev = em->bdev;
  1638. block_start = em->block_start;
  1639. free_extent_map(em);
  1640. em = NULL;
  1641. /* we've found a hole, just zero and go on */
  1642. if (block_start == EXTENT_MAP_HOLE) {
  1643. char *userpage;
  1644. userpage = kmap_atomic(page, KM_USER0);
  1645. memset(userpage + page_offset, 0, iosize);
  1646. flush_dcache_page(page);
  1647. kunmap_atomic(userpage, KM_USER0);
  1648. set_extent_uptodate(tree, cur, cur + iosize - 1,
  1649. GFP_NOFS);
  1650. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1651. cur = cur + iosize;
  1652. page_offset += iosize;
  1653. continue;
  1654. }
  1655. /* the get_extent function already copied into the page */
  1656. if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
  1657. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1658. cur = cur + iosize;
  1659. page_offset += iosize;
  1660. continue;
  1661. }
  1662. /* we have an inline extent but it didn't get marked up
  1663. * to date. Error out
  1664. */
  1665. if (block_start == EXTENT_MAP_INLINE) {
  1666. SetPageError(page);
  1667. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1668. cur = cur + iosize;
  1669. page_offset += iosize;
  1670. continue;
  1671. }
  1672. ret = 0;
  1673. if (tree->ops && tree->ops->readpage_io_hook) {
  1674. ret = tree->ops->readpage_io_hook(page, cur,
  1675. cur + iosize - 1);
  1676. }
  1677. if (!ret) {
  1678. unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
  1679. nr -= page->index;
  1680. ret = submit_extent_page(READ, tree, page,
  1681. sector, iosize, page_offset,
  1682. bdev, bio, nr,
  1683. end_bio_extent_readpage);
  1684. }
  1685. if (ret)
  1686. SetPageError(page);
  1687. cur = cur + iosize;
  1688. page_offset += iosize;
  1689. nr++;
  1690. }
  1691. if (!nr) {
  1692. if (!PageError(page))
  1693. SetPageUptodate(page);
  1694. unlock_page(page);
  1695. }
  1696. return 0;
  1697. }
  1698. int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
  1699. get_extent_t *get_extent)
  1700. {
  1701. struct bio *bio = NULL;
  1702. int ret;
  1703. ret = __extent_read_full_page(tree, page, get_extent, &bio);
  1704. if (bio)
  1705. submit_one_bio(READ, bio);
  1706. return ret;
  1707. }
  1708. EXPORT_SYMBOL(extent_read_full_page);
  1709. /*
  1710. * the writepage semantics are similar to regular writepage. extent
  1711. * records are inserted to lock ranges in the tree, and as dirty areas
  1712. * are found, they are marked writeback. Then the lock bits are removed
  1713. * and the end_io handler clears the writeback ranges
  1714. */
  1715. static int __extent_writepage(struct page *page, struct writeback_control *wbc,
  1716. void *data)
  1717. {
  1718. struct inode *inode = page->mapping->host;
  1719. struct extent_page_data *epd = data;
  1720. struct extent_io_tree *tree = epd->tree;
  1721. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1722. u64 delalloc_start;
  1723. u64 page_end = start + PAGE_CACHE_SIZE - 1;
  1724. u64 end;
  1725. u64 cur = start;
  1726. u64 extent_offset;
  1727. u64 last_byte = i_size_read(inode);
  1728. u64 block_start;
  1729. u64 iosize;
  1730. sector_t sector;
  1731. struct extent_map *em;
  1732. struct block_device *bdev;
  1733. int ret;
  1734. int nr = 0;
  1735. size_t page_offset = 0;
  1736. size_t blocksize;
  1737. loff_t i_size = i_size_read(inode);
  1738. unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
  1739. u64 nr_delalloc;
  1740. u64 delalloc_end;
  1741. WARN_ON(!PageLocked(page));
  1742. if (page->index > end_index) {
  1743. clear_extent_dirty(tree, start, page_end, GFP_NOFS);
  1744. unlock_page(page);
  1745. return 0;
  1746. }
  1747. if (page->index == end_index) {
  1748. char *userpage;
  1749. size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
  1750. userpage = kmap_atomic(page, KM_USER0);
  1751. memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
  1752. flush_dcache_page(page);
  1753. kunmap_atomic(userpage, KM_USER0);
  1754. }
  1755. set_page_extent_mapped(page);
  1756. delalloc_start = start;
  1757. delalloc_end = 0;
  1758. while(delalloc_end < page_end) {
  1759. nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
  1760. &delalloc_end,
  1761. 128 * 1024 * 1024);
  1762. if (nr_delalloc == 0) {
  1763. delalloc_start = delalloc_end + 1;
  1764. continue;
  1765. }
  1766. tree->ops->fill_delalloc(inode, delalloc_start,
  1767. delalloc_end);
  1768. clear_extent_bit(tree, delalloc_start,
  1769. delalloc_end,
  1770. EXTENT_LOCKED | EXTENT_DELALLOC,
  1771. 1, 0, GFP_NOFS);
  1772. delalloc_start = delalloc_end + 1;
  1773. }
  1774. lock_extent(tree, start, page_end, GFP_NOFS);
  1775. end = page_end;
  1776. if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
  1777. printk("found delalloc bits after lock_extent\n");
  1778. }
  1779. if (last_byte <= start) {
  1780. clear_extent_dirty(tree, start, page_end, GFP_NOFS);
  1781. goto done;
  1782. }
  1783. set_extent_uptodate(tree, start, page_end, GFP_NOFS);
  1784. blocksize = inode->i_sb->s_blocksize;
  1785. while (cur <= end) {
  1786. if (cur >= last_byte) {
  1787. clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
  1788. break;
  1789. }
  1790. em = epd->get_extent(inode, page, page_offset, cur,
  1791. end - cur + 1, 1);
  1792. if (IS_ERR(em) || !em) {
  1793. SetPageError(page);
  1794. break;
  1795. }
  1796. extent_offset = cur - em->start;
  1797. BUG_ON(extent_map_end(em) <= cur);
  1798. BUG_ON(end < cur);
  1799. iosize = min(extent_map_end(em) - cur, end - cur + 1);
  1800. iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
  1801. sector = (em->block_start + extent_offset) >> 9;
  1802. bdev = em->bdev;
  1803. block_start = em->block_start;
  1804. free_extent_map(em);
  1805. em = NULL;
  1806. if (block_start == EXTENT_MAP_HOLE ||
  1807. block_start == EXTENT_MAP_INLINE) {
  1808. clear_extent_dirty(tree, cur,
  1809. cur + iosize - 1, GFP_NOFS);
  1810. cur = cur + iosize;
  1811. page_offset += iosize;
  1812. continue;
  1813. }
  1814. /* leave this out until we have a page_mkwrite call */
  1815. if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
  1816. EXTENT_DIRTY, 0)) {
  1817. cur = cur + iosize;
  1818. page_offset += iosize;
  1819. continue;
  1820. }
  1821. clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
  1822. if (tree->ops && tree->ops->writepage_io_hook) {
  1823. ret = tree->ops->writepage_io_hook(page, cur,
  1824. cur + iosize - 1);
  1825. } else {
  1826. ret = 0;
  1827. }
  1828. if (ret)
  1829. SetPageError(page);
  1830. else {
  1831. unsigned long max_nr = end_index + 1;
  1832. set_range_writeback(tree, cur, cur + iosize - 1);
  1833. if (!PageWriteback(page)) {
  1834. printk("warning page %lu not writeback, "
  1835. "cur %llu end %llu\n", page->index,
  1836. (unsigned long long)cur,
  1837. (unsigned long long)end);
  1838. }
  1839. ret = submit_extent_page(WRITE, tree, page, sector,
  1840. iosize, page_offset, bdev,
  1841. &epd->bio, max_nr,
  1842. end_bio_extent_writepage);
  1843. if (ret)
  1844. SetPageError(page);
  1845. }
  1846. cur = cur + iosize;
  1847. page_offset += iosize;
  1848. nr++;
  1849. }
  1850. done:
  1851. if (nr == 0) {
  1852. /* make sure the mapping tag for page dirty gets cleared */
  1853. set_page_writeback(page);
  1854. end_page_writeback(page);
  1855. }
  1856. unlock_extent(tree, start, page_end, GFP_NOFS);
  1857. unlock_page(page);
  1858. return 0;
  1859. }
  1860. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
  1861. /* Taken directly from 2.6.23 for 2.6.18 back port */
  1862. typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
  1863. void *data);
  1864. /**
  1865. * write_cache_pages - walk the list of dirty pages of the given address space
  1866. * and write all of them.
  1867. * @mapping: address space structure to write
  1868. * @wbc: subtract the number of written pages from *@wbc->nr_to_write
  1869. * @writepage: function called for each page
  1870. * @data: data passed to writepage function
  1871. *
  1872. * If a page is already under I/O, write_cache_pages() skips it, even
  1873. * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
  1874. * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
  1875. * and msync() need to guarantee that all the data which was dirty at the time
  1876. * the call was made get new I/O started against them. If wbc->sync_mode is
  1877. * WB_SYNC_ALL then we were called for data integrity and we must wait for
  1878. * existing IO to complete.
  1879. */
  1880. static int write_cache_pages(struct address_space *mapping,
  1881. struct writeback_control *wbc, writepage_t writepage,
  1882. void *data)
  1883. {
  1884. struct backing_dev_info *bdi = mapping->backing_dev_info;
  1885. int ret = 0;
  1886. int done = 0;
  1887. struct pagevec pvec;
  1888. int nr_pages;
  1889. pgoff_t index;
  1890. pgoff_t end; /* Inclusive */
  1891. int scanned = 0;
  1892. int range_whole = 0;
  1893. if (wbc->nonblocking && bdi_write_congested(bdi)) {
  1894. wbc->encountered_congestion = 1;
  1895. return 0;
  1896. }
  1897. pagevec_init(&pvec, 0);
  1898. if (wbc->range_cyclic) {
  1899. index = mapping->writeback_index; /* Start from prev offset */
  1900. end = -1;
  1901. } else {
  1902. index = wbc->range_start >> PAGE_CACHE_SHIFT;
  1903. end = wbc->range_end >> PAGE_CACHE_SHIFT;
  1904. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  1905. range_whole = 1;
  1906. scanned = 1;
  1907. }
  1908. retry:
  1909. while (!done && (index <= end) &&
  1910. (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  1911. PAGECACHE_TAG_DIRTY,
  1912. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
  1913. unsigned i;
  1914. scanned = 1;
  1915. for (i = 0; i < nr_pages; i++) {
  1916. struct page *page = pvec.pages[i];
  1917. /*
  1918. * At this point we hold neither mapping->tree_lock nor
  1919. * lock on the page itself: the page may be truncated or
  1920. * invalidated (changing page->mapping to NULL), or even
  1921. * swizzled back from swapper_space to tmpfs file
  1922. * mapping
  1923. */
  1924. lock_page(page);
  1925. if (unlikely(page->mapping != mapping)) {
  1926. unlock_page(page);
  1927. continue;
  1928. }
  1929. if (!wbc->range_cyclic && page->index > end) {
  1930. done = 1;
  1931. unlock_page(page);
  1932. continue;
  1933. }
  1934. if (wbc->sync_mode != WB_SYNC_NONE)
  1935. wait_on_page_writeback(page);
  1936. if (PageWriteback(page) ||
  1937. !clear_page_dirty_for_io(page)) {
  1938. unlock_page(page);
  1939. continue;
  1940. }
  1941. ret = (*writepage)(page, wbc, data);
  1942. if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
  1943. unlock_page(page);
  1944. ret = 0;
  1945. }
  1946. if (ret || (--(wbc->nr_to_write) <= 0))
  1947. done = 1;
  1948. if (wbc->nonblocking && bdi_write_congested(bdi)) {
  1949. wbc->encountered_congestion = 1;
  1950. done = 1;
  1951. }
  1952. }
  1953. pagevec_release(&pvec);
  1954. cond_resched();
  1955. }
  1956. if (!scanned && !done) {
  1957. /*
  1958. * We hit the last page and there is more work to be done: wrap
  1959. * back to the start of the file
  1960. */
  1961. scanned = 1;
  1962. index = 0;
  1963. goto retry;
  1964. }
  1965. if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  1966. mapping->writeback_index = index;
  1967. return ret;
  1968. }
  1969. #endif
  1970. int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
  1971. get_extent_t *get_extent,
  1972. struct writeback_control *wbc)
  1973. {
  1974. int ret;
  1975. struct address_space *mapping = page->mapping;
  1976. struct extent_page_data epd = {
  1977. .bio = NULL,
  1978. .tree = tree,
  1979. .get_extent = get_extent,
  1980. };
  1981. struct writeback_control wbc_writepages = {
  1982. .bdi = wbc->bdi,
  1983. .sync_mode = WB_SYNC_NONE,
  1984. .older_than_this = NULL,
  1985. .nr_to_write = 64,
  1986. .range_start = page_offset(page) + PAGE_CACHE_SIZE,
  1987. .range_end = (loff_t)-1,
  1988. };
  1989. ret = __extent_writepage(page, wbc, &epd);
  1990. write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
  1991. if (epd.bio) {
  1992. submit_one_bio(WRITE, epd.bio);
  1993. }
  1994. return ret;
  1995. }
  1996. EXPORT_SYMBOL(extent_write_full_page);
  1997. int extent_writepages(struct extent_io_tree *tree,
  1998. struct address_space *mapping,
  1999. get_extent_t *get_extent,
  2000. struct writeback_control *wbc)
  2001. {
  2002. int ret = 0;
  2003. struct extent_page_data epd = {
  2004. .bio = NULL,
  2005. .tree = tree,
  2006. .get_extent = get_extent,
  2007. };
  2008. ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
  2009. if (epd.bio) {
  2010. submit_one_bio(WRITE, epd.bio);
  2011. }
  2012. return ret;
  2013. }
  2014. EXPORT_SYMBOL(extent_writepages);
  2015. int extent_readpages(struct extent_io_tree *tree,
  2016. struct address_space *mapping,
  2017. struct list_head *pages, unsigned nr_pages,
  2018. get_extent_t get_extent)
  2019. {
  2020. struct bio *bio = NULL;
  2021. unsigned page_idx;
  2022. struct pagevec pvec;
  2023. pagevec_init(&pvec, 0);
  2024. for (page_idx = 0; page_idx < nr_pages; page_idx++) {
  2025. struct page *page = list_entry(pages->prev, struct page, lru);
  2026. prefetchw(&page->flags);
  2027. list_del(&page->lru);
  2028. /*
  2029. * what we want to do here is call add_to_page_cache_lru,
  2030. * but that isn't exported, so we reproduce it here
  2031. */
  2032. if (!add_to_page_cache(page, mapping,
  2033. page->index, GFP_KERNEL)) {
  2034. /* open coding of lru_cache_add, also not exported */
  2035. page_cache_get(page);
  2036. if (!pagevec_add(&pvec, page))
  2037. __pagevec_lru_add(&pvec);
  2038. __extent_read_full_page(tree, page, get_extent, &bio);
  2039. }
  2040. page_cache_release(page);
  2041. }
  2042. if (pagevec_count(&pvec))
  2043. __pagevec_lru_add(&pvec);
  2044. BUG_ON(!list_empty(pages));
  2045. if (bio)
  2046. submit_one_bio(READ, bio);
  2047. return 0;
  2048. }
  2049. EXPORT_SYMBOL(extent_readpages);
  2050. /*
  2051. * basic invalidatepage code, this waits on any locked or writeback
  2052. * ranges corresponding to the page, and then deletes any extent state
  2053. * records from the tree
  2054. */
  2055. int extent_invalidatepage(struct extent_io_tree *tree,
  2056. struct page *page, unsigned long offset)
  2057. {
  2058. u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
  2059. u64 end = start + PAGE_CACHE_SIZE - 1;
  2060. size_t blocksize = page->mapping->host->i_sb->s_blocksize;
  2061. start += (offset + blocksize -1) & ~(blocksize - 1);
  2062. if (start > end)
  2063. return 0;
  2064. lock_extent(tree, start, end, GFP_NOFS);
  2065. wait_on_extent_writeback(tree, start, end);
  2066. clear_extent_bit(tree, start, end,
  2067. EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
  2068. 1, 1, GFP_NOFS);
  2069. return 0;
  2070. }
  2071. EXPORT_SYMBOL(extent_invalidatepage);
  2072. /*
  2073. * simple commit_write call, set_range_dirty is used to mark both
  2074. * the pages and the extent records as dirty
  2075. */
  2076. int extent_commit_write(struct extent_io_tree *tree,
  2077. struct inode *inode, struct page *page,
  2078. unsigned from, unsigned to)
  2079. {
  2080. loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
  2081. set_page_extent_mapped(page);
  2082. set_page_dirty(page);
  2083. if (pos > inode->i_size) {
  2084. i_size_write(inode, pos);
  2085. mark_inode_dirty(inode);
  2086. }
  2087. return 0;
  2088. }
  2089. EXPORT_SYMBOL(extent_commit_write);
  2090. int extent_prepare_write(struct extent_io_tree *tree,
  2091. struct inode *inode, struct page *page,
  2092. unsigned from, unsigned to, get_extent_t *get_extent)
  2093. {
  2094. u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  2095. u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
  2096. u64 block_start;
  2097. u64 orig_block_start;
  2098. u64 block_end;
  2099. u64 cur_end;
  2100. struct extent_map *em;
  2101. unsigned blocksize = 1 << inode->i_blkbits;
  2102. size_t page_offset = 0;
  2103. size_t block_off_start;
  2104. size_t block_off_end;
  2105. int err = 0;
  2106. int iocount = 0;
  2107. int ret = 0;
  2108. int isnew;
  2109. set_page_extent_mapped(page);
  2110. block_start = (page_start + from) & ~((u64)blocksize - 1);
  2111. block_end = (page_start + to - 1) | (blocksize - 1);
  2112. orig_block_start = block_start;
  2113. lock_extent(tree, page_start, page_end, GFP_NOFS);
  2114. while(block_start <= block_end) {
  2115. em = get_extent(inode, page, page_offset, block_start,
  2116. block_end - block_start + 1, 1);
  2117. if (IS_ERR(em) || !em) {
  2118. goto err;
  2119. }
  2120. cur_end = min(block_end, extent_map_end(em) - 1);
  2121. block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
  2122. block_off_end = block_off_start + blocksize;
  2123. isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
  2124. if (!PageUptodate(page) && isnew &&
  2125. (block_off_end > to || block_off_start < from)) {
  2126. void *kaddr;
  2127. kaddr = kmap_atomic(page, KM_USER0);
  2128. if (block_off_end > to)
  2129. memset(kaddr + to, 0, block_off_end - to);
  2130. if (block_off_start < from)
  2131. memset(kaddr + block_off_start, 0,
  2132. from - block_off_start);
  2133. flush_dcache_page(page);
  2134. kunmap_atomic(kaddr, KM_USER0);
  2135. }
  2136. if ((em->block_start != EXTENT_MAP_HOLE &&
  2137. em->block_start != EXTENT_MAP_INLINE) &&
  2138. !isnew && !PageUptodate(page) &&
  2139. (block_off_end > to || block_off_start < from) &&
  2140. !test_range_bit(tree, block_start, cur_end,
  2141. EXTENT_UPTODATE, 1)) {
  2142. u64 sector;
  2143. u64 extent_offset = block_start - em->start;
  2144. size_t iosize;
  2145. sector = (em->block_start + extent_offset) >> 9;
  2146. iosize = (cur_end - block_start + blocksize) &
  2147. ~((u64)blocksize - 1);
  2148. /*
  2149. * we've already got the extent locked, but we
  2150. * need to split the state such that our end_bio
  2151. * handler can clear the lock.
  2152. */
  2153. set_extent_bit(tree, block_start,
  2154. block_start + iosize - 1,
  2155. EXTENT_LOCKED, 0, NULL, GFP_NOFS);
  2156. ret = submit_extent_page(READ, tree, page,
  2157. sector, iosize, page_offset, em->bdev,
  2158. NULL, 1,
  2159. end_bio_extent_preparewrite);
  2160. iocount++;
  2161. block_start = block_start + iosize;
  2162. } else {
  2163. set_extent_uptodate(tree, block_start, cur_end,
  2164. GFP_NOFS);
  2165. unlock_extent(tree, block_start, cur_end, GFP_NOFS);
  2166. block_start = cur_end + 1;
  2167. }
  2168. page_offset = block_start & (PAGE_CACHE_SIZE - 1);
  2169. free_extent_map(em);
  2170. }
  2171. if (iocount) {
  2172. wait_extent_bit(tree, orig_block_start,
  2173. block_end, EXTENT_LOCKED);
  2174. }
  2175. check_page_uptodate(tree, page);
  2176. err:
  2177. /* FIXME, zero out newly allocated blocks on error */
  2178. return err;
  2179. }
  2180. EXPORT_SYMBOL(extent_prepare_write);
  2181. /*
  2182. * a helper for releasepage. As long as there are no locked extents
  2183. * in the range corresponding to the page, both state records and extent
  2184. * map records are removed
  2185. */
  2186. int try_release_extent_mapping(struct extent_map_tree *map,
  2187. struct extent_io_tree *tree, struct page *page,
  2188. gfp_t mask)
  2189. {
  2190. struct extent_map *em;
  2191. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  2192. u64 end = start + PAGE_CACHE_SIZE - 1;
  2193. u64 orig_start = start;
  2194. int ret = 1;
  2195. if ((mask & __GFP_WAIT) &&
  2196. page->mapping->host->i_size > 16 * 1024 * 1024) {
  2197. while (start <= end) {
  2198. spin_lock(&map->lock);
  2199. em = lookup_extent_mapping(map, start, end);
  2200. if (!em || IS_ERR(em)) {
  2201. spin_unlock(&map->lock);
  2202. break;
  2203. }
  2204. if (em->start != start) {
  2205. spin_unlock(&map->lock);
  2206. free_extent_map(em);
  2207. break;
  2208. }
  2209. if (!test_range_bit(tree, em->start,
  2210. extent_map_end(em) - 1,
  2211. EXTENT_LOCKED, 0)) {
  2212. remove_extent_mapping(map, em);
  2213. /* once for the rb tree */
  2214. free_extent_map(em);
  2215. }
  2216. start = extent_map_end(em);
  2217. spin_unlock(&map->lock);
  2218. /* once for us */
  2219. free_extent_map(em);
  2220. }
  2221. }
  2222. if (test_range_bit(tree, orig_start, end, EXTENT_IOBITS, 0))
  2223. ret = 0;
  2224. else {
  2225. if ((mask & GFP_NOFS) == GFP_NOFS)
  2226. mask = GFP_NOFS;
  2227. clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
  2228. 1, 1, mask);
  2229. }
  2230. return ret;
  2231. }
  2232. EXPORT_SYMBOL(try_release_extent_mapping);
  2233. sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
  2234. get_extent_t *get_extent)
  2235. {
  2236. struct inode *inode = mapping->host;
  2237. u64 start = iblock << inode->i_blkbits;
  2238. sector_t sector = 0;
  2239. struct extent_map *em;
  2240. em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0);
  2241. if (!em || IS_ERR(em))
  2242. return 0;
  2243. if (em->block_start == EXTENT_MAP_INLINE ||
  2244. em->block_start == EXTENT_MAP_HOLE)
  2245. goto out;
  2246. sector = (em->block_start + start - em->start) >> inode->i_blkbits;
  2247. out:
  2248. free_extent_map(em);
  2249. return sector;
  2250. }
  2251. static int add_lru(struct extent_io_tree *tree, struct extent_buffer *eb)
  2252. {
  2253. if (list_empty(&eb->lru)) {
  2254. extent_buffer_get(eb);
  2255. list_add(&eb->lru, &tree->buffer_lru);
  2256. tree->lru_size++;
  2257. if (tree->lru_size >= BUFFER_LRU_MAX) {
  2258. struct extent_buffer *rm;
  2259. rm = list_entry(tree->buffer_lru.prev,
  2260. struct extent_buffer, lru);
  2261. tree->lru_size--;
  2262. list_del_init(&rm->lru);
  2263. free_extent_buffer(rm);
  2264. }
  2265. } else
  2266. list_move(&eb->lru, &tree->buffer_lru);
  2267. return 0;
  2268. }
  2269. static struct extent_buffer *find_lru(struct extent_io_tree *tree,
  2270. u64 start, unsigned long len)
  2271. {
  2272. struct list_head *lru = &tree->buffer_lru;
  2273. struct list_head *cur = lru->next;
  2274. struct extent_buffer *eb;
  2275. if (list_empty(lru))
  2276. return NULL;
  2277. do {
  2278. eb = list_entry(cur, struct extent_buffer, lru);
  2279. if (eb->start == start && eb->len == len) {
  2280. extent_buffer_get(eb);
  2281. return eb;
  2282. }
  2283. cur = cur->next;
  2284. } while (cur != lru);
  2285. return NULL;
  2286. }
  2287. static inline unsigned long num_extent_pages(u64 start, u64 len)
  2288. {
  2289. return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
  2290. (start >> PAGE_CACHE_SHIFT);
  2291. }
  2292. static inline struct page *extent_buffer_page(struct extent_buffer *eb,
  2293. unsigned long i)
  2294. {
  2295. struct page *p;
  2296. struct address_space *mapping;
  2297. if (i == 0)
  2298. return eb->first_page;
  2299. i += eb->start >> PAGE_CACHE_SHIFT;
  2300. mapping = eb->first_page->mapping;
  2301. read_lock_irq(&mapping->tree_lock);
  2302. p = radix_tree_lookup(&mapping->page_tree, i);
  2303. read_unlock_irq(&mapping->tree_lock);
  2304. return p;
  2305. }
  2306. static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
  2307. u64 start,
  2308. unsigned long len,
  2309. gfp_t mask)
  2310. {
  2311. struct extent_buffer *eb = NULL;
  2312. spin_lock(&tree->lru_lock);
  2313. eb = find_lru(tree, start, len);
  2314. spin_unlock(&tree->lru_lock);
  2315. if (eb) {
  2316. return eb;
  2317. }
  2318. eb = kmem_cache_zalloc(extent_buffer_cache, mask);
  2319. INIT_LIST_HEAD(&eb->lru);
  2320. eb->start = start;
  2321. eb->len = len;
  2322. atomic_set(&eb->refs, 1);
  2323. return eb;
  2324. }
  2325. static void __free_extent_buffer(struct extent_buffer *eb)
  2326. {
  2327. kmem_cache_free(extent_buffer_cache, eb);
  2328. }
  2329. struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
  2330. u64 start, unsigned long len,
  2331. struct page *page0,
  2332. gfp_t mask)
  2333. {
  2334. unsigned long num_pages = num_extent_pages(start, len);
  2335. unsigned long i;
  2336. unsigned long index = start >> PAGE_CACHE_SHIFT;
  2337. struct extent_buffer *eb;
  2338. struct page *p;
  2339. struct address_space *mapping = tree->mapping;
  2340. int uptodate = 1;
  2341. eb = __alloc_extent_buffer(tree, start, len, mask);
  2342. if (!eb || IS_ERR(eb))
  2343. return NULL;
  2344. if (eb->flags & EXTENT_BUFFER_FILLED)
  2345. goto lru_add;
  2346. if (page0) {
  2347. eb->first_page = page0;
  2348. i = 1;
  2349. index++;
  2350. page_cache_get(page0);
  2351. mark_page_accessed(page0);
  2352. set_page_extent_mapped(page0);
  2353. WARN_ON(!PageUptodate(page0));
  2354. set_page_extent_head(page0, len);
  2355. } else {
  2356. i = 0;
  2357. }
  2358. for (; i < num_pages; i++, index++) {
  2359. p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
  2360. if (!p) {
  2361. WARN_ON(1);
  2362. goto fail;
  2363. }
  2364. set_page_extent_mapped(p);
  2365. mark_page_accessed(p);
  2366. if (i == 0) {
  2367. eb->first_page = p;
  2368. set_page_extent_head(p, len);
  2369. } else {
  2370. set_page_private(p, EXTENT_PAGE_PRIVATE);
  2371. }
  2372. if (!PageUptodate(p))
  2373. uptodate = 0;
  2374. unlock_page(p);
  2375. }
  2376. if (uptodate)
  2377. eb->flags |= EXTENT_UPTODATE;
  2378. eb->flags |= EXTENT_BUFFER_FILLED;
  2379. lru_add:
  2380. spin_lock(&tree->lru_lock);
  2381. add_lru(tree, eb);
  2382. spin_unlock(&tree->lru_lock);
  2383. return eb;
  2384. fail:
  2385. spin_lock(&tree->lru_lock);
  2386. list_del_init(&eb->lru);
  2387. spin_unlock(&tree->lru_lock);
  2388. if (!atomic_dec_and_test(&eb->refs))
  2389. return NULL;
  2390. for (index = 1; index < i; index++) {
  2391. page_cache_release(extent_buffer_page(eb, index));
  2392. }
  2393. if (i > 0)
  2394. page_cache_release(extent_buffer_page(eb, 0));
  2395. __free_extent_buffer(eb);
  2396. return NULL;
  2397. }
  2398. EXPORT_SYMBOL(alloc_extent_buffer);
  2399. struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
  2400. u64 start, unsigned long len,
  2401. gfp_t mask)
  2402. {
  2403. unsigned long num_pages = num_extent_pages(start, len);
  2404. unsigned long i;
  2405. unsigned long index = start >> PAGE_CACHE_SHIFT;
  2406. struct extent_buffer *eb;
  2407. struct page *p;
  2408. struct address_space *mapping = tree->mapping;
  2409. int uptodate = 1;
  2410. eb = __alloc_extent_buffer(tree, start, len, mask);
  2411. if (!eb || IS_ERR(eb))
  2412. return NULL;
  2413. if (eb->flags & EXTENT_BUFFER_FILLED)
  2414. goto lru_add;
  2415. for (i = 0; i < num_pages; i++, index++) {
  2416. p = find_lock_page(mapping, index);
  2417. if (!p) {
  2418. goto fail;
  2419. }
  2420. set_page_extent_mapped(p);
  2421. mark_page_accessed(p);
  2422. if (i == 0) {
  2423. eb->first_page = p;
  2424. set_page_extent_head(p, len);
  2425. } else {
  2426. set_page_private(p, EXTENT_PAGE_PRIVATE);
  2427. }
  2428. if (!PageUptodate(p))
  2429. uptodate = 0;
  2430. unlock_page(p);
  2431. }
  2432. if (uptodate)
  2433. eb->flags |= EXTENT_UPTODATE;
  2434. eb->flags |= EXTENT_BUFFER_FILLED;
  2435. lru_add:
  2436. spin_lock(&tree->lru_lock);
  2437. add_lru(tree, eb);
  2438. spin_unlock(&tree->lru_lock);
  2439. return eb;
  2440. fail:
  2441. spin_lock(&tree->lru_lock);
  2442. list_del_init(&eb->lru);
  2443. spin_unlock(&tree->lru_lock);
  2444. if (!atomic_dec_and_test(&eb->refs))
  2445. return NULL;
  2446. for (index = 1; index < i; index++) {
  2447. page_cache_release(extent_buffer_page(eb, index));
  2448. }
  2449. if (i > 0)
  2450. page_cache_release(extent_buffer_page(eb, 0));
  2451. __free_extent_buffer(eb);
  2452. return NULL;
  2453. }
  2454. EXPORT_SYMBOL(find_extent_buffer);
  2455. void free_extent_buffer(struct extent_buffer *eb)
  2456. {
  2457. unsigned long i;
  2458. unsigned long num_pages;
  2459. if (!eb)
  2460. return;
  2461. if (!atomic_dec_and_test(&eb->refs))
  2462. return;
  2463. WARN_ON(!list_empty(&eb->lru));
  2464. num_pages = num_extent_pages(eb->start, eb->len);
  2465. for (i = 1; i < num_pages; i++) {
  2466. page_cache_release(extent_buffer_page(eb, i));
  2467. }
  2468. page_cache_release(extent_buffer_page(eb, 0));
  2469. __free_extent_buffer(eb);
  2470. }
  2471. EXPORT_SYMBOL(free_extent_buffer);
  2472. int clear_extent_buffer_dirty(struct extent_io_tree *tree,
  2473. struct extent_buffer *eb)
  2474. {
  2475. int set;
  2476. unsigned long i;
  2477. unsigned long num_pages;
  2478. struct page *page;
  2479. u64 start = eb->start;
  2480. u64 end = start + eb->len - 1;
  2481. set = clear_extent_dirty(tree, start, end, GFP_NOFS);
  2482. num_pages = num_extent_pages(eb->start, eb->len);
  2483. for (i = 0; i < num_pages; i++) {
  2484. page = extent_buffer_page(eb, i);
  2485. lock_page(page);
  2486. if (i == 0)
  2487. set_page_extent_head(page, eb->len);
  2488. else
  2489. set_page_private(page, EXTENT_PAGE_PRIVATE);
  2490. /*
  2491. * if we're on the last page or the first page and the
  2492. * block isn't aligned on a page boundary, do extra checks
  2493. * to make sure we don't clean page that is partially dirty
  2494. */
  2495. if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
  2496. ((i == num_pages - 1) &&
  2497. ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
  2498. start = (u64)page->index << PAGE_CACHE_SHIFT;
  2499. end = start + PAGE_CACHE_SIZE - 1;
  2500. if (test_range_bit(tree, start, end,
  2501. EXTENT_DIRTY, 0)) {
  2502. unlock_page(page);
  2503. continue;
  2504. }
  2505. }
  2506. clear_page_dirty_for_io(page);
  2507. read_lock_irq(&page->mapping->tree_lock);
  2508. if (!PageDirty(page)) {
  2509. radix_tree_tag_clear(&page->mapping->page_tree,
  2510. page_index(page),
  2511. PAGECACHE_TAG_DIRTY);
  2512. }
  2513. read_unlock_irq(&page->mapping->tree_lock);
  2514. unlock_page(page);
  2515. }
  2516. return 0;
  2517. }
  2518. EXPORT_SYMBOL(clear_extent_buffer_dirty);
  2519. int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
  2520. struct extent_buffer *eb)
  2521. {
  2522. return wait_on_extent_writeback(tree, eb->start,
  2523. eb->start + eb->len - 1);
  2524. }
  2525. EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
  2526. int set_extent_buffer_dirty(struct extent_io_tree *tree,
  2527. struct extent_buffer *eb)
  2528. {
  2529. unsigned long i;
  2530. unsigned long num_pages;
  2531. num_pages = num_extent_pages(eb->start, eb->len);
  2532. for (i = 0; i < num_pages; i++) {
  2533. struct page *page = extent_buffer_page(eb, i);
  2534. /* writepage may need to do something special for the
  2535. * first page, we have to make sure page->private is
  2536. * properly set. releasepage may drop page->private
  2537. * on us if the page isn't already dirty.
  2538. */
  2539. if (i == 0) {
  2540. lock_page(page);
  2541. set_page_extent_head(page, eb->len);
  2542. } else if (PagePrivate(page) &&
  2543. page->private != EXTENT_PAGE_PRIVATE) {
  2544. lock_page(page);
  2545. set_page_extent_mapped(page);
  2546. unlock_page(page);
  2547. }
  2548. __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
  2549. if (i == 0)
  2550. unlock_page(page);
  2551. }
  2552. return set_extent_dirty(tree, eb->start,
  2553. eb->start + eb->len - 1, GFP_NOFS);
  2554. }
  2555. EXPORT_SYMBOL(set_extent_buffer_dirty);
  2556. int set_extent_buffer_uptodate(struct extent_io_tree *tree,
  2557. struct extent_buffer *eb)
  2558. {
  2559. unsigned long i;
  2560. struct page *page;
  2561. unsigned long num_pages;
  2562. num_pages = num_extent_pages(eb->start, eb->len);
  2563. set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
  2564. GFP_NOFS);
  2565. for (i = 0; i < num_pages; i++) {
  2566. page = extent_buffer_page(eb, i);
  2567. if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
  2568. ((i == num_pages - 1) &&
  2569. ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
  2570. check_page_uptodate(tree, page);
  2571. continue;
  2572. }
  2573. SetPageUptodate(page);
  2574. }
  2575. return 0;
  2576. }
  2577. EXPORT_SYMBOL(set_extent_buffer_uptodate);
  2578. int extent_buffer_uptodate(struct extent_io_tree *tree,
  2579. struct extent_buffer *eb)
  2580. {
  2581. if (eb->flags & EXTENT_UPTODATE)
  2582. return 1;
  2583. return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
  2584. EXTENT_UPTODATE, 1);
  2585. }
  2586. EXPORT_SYMBOL(extent_buffer_uptodate);
  2587. int read_extent_buffer_pages(struct extent_io_tree *tree,
  2588. struct extent_buffer *eb,
  2589. u64 start,
  2590. int wait)
  2591. {
  2592. unsigned long i;
  2593. unsigned long start_i;
  2594. struct page *page;
  2595. int err;
  2596. int ret = 0;
  2597. unsigned long num_pages;
  2598. if (eb->flags & EXTENT_UPTODATE)
  2599. return 0;
  2600. if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
  2601. EXTENT_UPTODATE, 1)) {
  2602. return 0;
  2603. }
  2604. if (start) {
  2605. WARN_ON(start < eb->start);
  2606. start_i = (start >> PAGE_CACHE_SHIFT) -
  2607. (eb->start >> PAGE_CACHE_SHIFT);
  2608. } else {
  2609. start_i = 0;
  2610. }
  2611. num_pages = num_extent_pages(eb->start, eb->len);
  2612. for (i = start_i; i < num_pages; i++) {
  2613. page = extent_buffer_page(eb, i);
  2614. if (PageUptodate(page)) {
  2615. continue;
  2616. }
  2617. if (!wait) {
  2618. if (TestSetPageLocked(page)) {
  2619. continue;
  2620. }
  2621. } else {
  2622. lock_page(page);
  2623. }
  2624. if (!PageUptodate(page)) {
  2625. err = page->mapping->a_ops->readpage(NULL, page);
  2626. if (err) {
  2627. ret = err;
  2628. }
  2629. } else {
  2630. unlock_page(page);
  2631. }
  2632. }
  2633. if (ret || !wait) {
  2634. return ret;
  2635. }
  2636. for (i = start_i; i < num_pages; i++) {
  2637. page = extent_buffer_page(eb, i);
  2638. wait_on_page_locked(page);
  2639. if (!PageUptodate(page)) {
  2640. ret = -EIO;
  2641. }
  2642. }
  2643. if (!ret)
  2644. eb->flags |= EXTENT_UPTODATE;
  2645. return ret;
  2646. }
  2647. EXPORT_SYMBOL(read_extent_buffer_pages);
  2648. void read_extent_buffer(struct extent_buffer *eb, void *dstv,
  2649. unsigned long start,
  2650. unsigned long len)
  2651. {
  2652. size_t cur;
  2653. size_t offset;
  2654. struct page *page;
  2655. char *kaddr;
  2656. char *dst = (char *)dstv;
  2657. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2658. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2659. unsigned long num_pages = num_extent_pages(eb->start, eb->len);
  2660. WARN_ON(start > eb->len);
  2661. WARN_ON(start + len > eb->start + eb->len);
  2662. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2663. while(len > 0) {
  2664. page = extent_buffer_page(eb, i);
  2665. if (!PageUptodate(page)) {
  2666. printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
  2667. WARN_ON(1);
  2668. }
  2669. WARN_ON(!PageUptodate(page));
  2670. cur = min(len, (PAGE_CACHE_SIZE - offset));
  2671. kaddr = kmap_atomic(page, KM_USER1);
  2672. memcpy(dst, kaddr + offset, cur);
  2673. kunmap_atomic(kaddr, KM_USER1);
  2674. dst += cur;
  2675. len -= cur;
  2676. offset = 0;
  2677. i++;
  2678. }
  2679. }
  2680. EXPORT_SYMBOL(read_extent_buffer);
  2681. int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
  2682. unsigned long min_len, char **token, char **map,
  2683. unsigned long *map_start,
  2684. unsigned long *map_len, int km)
  2685. {
  2686. size_t offset = start & (PAGE_CACHE_SIZE - 1);
  2687. char *kaddr;
  2688. struct page *p;
  2689. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2690. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2691. unsigned long end_i = (start_offset + start + min_len - 1) >>
  2692. PAGE_CACHE_SHIFT;
  2693. if (i != end_i)
  2694. return -EINVAL;
  2695. if (i == 0) {
  2696. offset = start_offset;
  2697. *map_start = 0;
  2698. } else {
  2699. offset = 0;
  2700. *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
  2701. }
  2702. if (start + min_len > eb->len) {
  2703. printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
  2704. WARN_ON(1);
  2705. }
  2706. p = extent_buffer_page(eb, i);
  2707. WARN_ON(!PageUptodate(p));
  2708. kaddr = kmap_atomic(p, km);
  2709. *token = kaddr;
  2710. *map = kaddr + offset;
  2711. *map_len = PAGE_CACHE_SIZE - offset;
  2712. return 0;
  2713. }
  2714. EXPORT_SYMBOL(map_private_extent_buffer);
  2715. int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
  2716. unsigned long min_len,
  2717. char **token, char **map,
  2718. unsigned long *map_start,
  2719. unsigned long *map_len, int km)
  2720. {
  2721. int err;
  2722. int save = 0;
  2723. if (eb->map_token) {
  2724. unmap_extent_buffer(eb, eb->map_token, km);
  2725. eb->map_token = NULL;
  2726. save = 1;
  2727. }
  2728. err = map_private_extent_buffer(eb, start, min_len, token, map,
  2729. map_start, map_len, km);
  2730. if (!err && save) {
  2731. eb->map_token = *token;
  2732. eb->kaddr = *map;
  2733. eb->map_start = *map_start;
  2734. eb->map_len = *map_len;
  2735. }
  2736. return err;
  2737. }
  2738. EXPORT_SYMBOL(map_extent_buffer);
  2739. void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
  2740. {
  2741. kunmap_atomic(token, km);
  2742. }
  2743. EXPORT_SYMBOL(unmap_extent_buffer);
  2744. int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
  2745. unsigned long start,
  2746. unsigned long len)
  2747. {
  2748. size_t cur;
  2749. size_t offset;
  2750. struct page *page;
  2751. char *kaddr;
  2752. char *ptr = (char *)ptrv;
  2753. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2754. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2755. int ret = 0;
  2756. WARN_ON(start > eb->len);
  2757. WARN_ON(start + len > eb->start + eb->len);
  2758. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2759. while(len > 0) {
  2760. page = extent_buffer_page(eb, i);
  2761. WARN_ON(!PageUptodate(page));
  2762. cur = min(len, (PAGE_CACHE_SIZE - offset));
  2763. kaddr = kmap_atomic(page, KM_USER0);
  2764. ret = memcmp(ptr, kaddr + offset, cur);
  2765. kunmap_atomic(kaddr, KM_USER0);
  2766. if (ret)
  2767. break;
  2768. ptr += cur;
  2769. len -= cur;
  2770. offset = 0;
  2771. i++;
  2772. }
  2773. return ret;
  2774. }
  2775. EXPORT_SYMBOL(memcmp_extent_buffer);
  2776. void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
  2777. unsigned long start, unsigned long len)
  2778. {
  2779. size_t cur;
  2780. size_t offset;
  2781. struct page *page;
  2782. char *kaddr;
  2783. char *src = (char *)srcv;
  2784. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2785. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2786. WARN_ON(start > eb->len);
  2787. WARN_ON(start + len > eb->start + eb->len);
  2788. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2789. while(len > 0) {
  2790. page = extent_buffer_page(eb, i);
  2791. WARN_ON(!PageUptodate(page));
  2792. cur = min(len, PAGE_CACHE_SIZE - offset);
  2793. kaddr = kmap_atomic(page, KM_USER1);
  2794. memcpy(kaddr + offset, src, cur);
  2795. kunmap_atomic(kaddr, KM_USER1);
  2796. src += cur;
  2797. len -= cur;
  2798. offset = 0;
  2799. i++;
  2800. }
  2801. }
  2802. EXPORT_SYMBOL(write_extent_buffer);
  2803. void memset_extent_buffer(struct extent_buffer *eb, char c,
  2804. unsigned long start, unsigned long len)
  2805. {
  2806. size_t cur;
  2807. size_t offset;
  2808. struct page *page;
  2809. char *kaddr;
  2810. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2811. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2812. WARN_ON(start > eb->len);
  2813. WARN_ON(start + len > eb->start + eb->len);
  2814. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2815. while(len > 0) {
  2816. page = extent_buffer_page(eb, i);
  2817. WARN_ON(!PageUptodate(page));
  2818. cur = min(len, PAGE_CACHE_SIZE - offset);
  2819. kaddr = kmap_atomic(page, KM_USER0);
  2820. memset(kaddr + offset, c, cur);
  2821. kunmap_atomic(kaddr, KM_USER0);
  2822. len -= cur;
  2823. offset = 0;
  2824. i++;
  2825. }
  2826. }
  2827. EXPORT_SYMBOL(memset_extent_buffer);
  2828. void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
  2829. unsigned long dst_offset, unsigned long src_offset,
  2830. unsigned long len)
  2831. {
  2832. u64 dst_len = dst->len;
  2833. size_t cur;
  2834. size_t offset;
  2835. struct page *page;
  2836. char *kaddr;
  2837. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  2838. unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
  2839. WARN_ON(src->len != dst_len);
  2840. offset = (start_offset + dst_offset) &
  2841. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2842. while(len > 0) {
  2843. page = extent_buffer_page(dst, i);
  2844. WARN_ON(!PageUptodate(page));
  2845. cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
  2846. kaddr = kmap_atomic(page, KM_USER0);
  2847. read_extent_buffer(src, kaddr + offset, src_offset, cur);
  2848. kunmap_atomic(kaddr, KM_USER0);
  2849. src_offset += cur;
  2850. len -= cur;
  2851. offset = 0;
  2852. i++;
  2853. }
  2854. }
  2855. EXPORT_SYMBOL(copy_extent_buffer);
  2856. static void move_pages(struct page *dst_page, struct page *src_page,
  2857. unsigned long dst_off, unsigned long src_off,
  2858. unsigned long len)
  2859. {
  2860. char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
  2861. if (dst_page == src_page) {
  2862. memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
  2863. } else {
  2864. char *src_kaddr = kmap_atomic(src_page, KM_USER1);
  2865. char *p = dst_kaddr + dst_off + len;
  2866. char *s = src_kaddr + src_off + len;
  2867. while (len--)
  2868. *--p = *--s;
  2869. kunmap_atomic(src_kaddr, KM_USER1);
  2870. }
  2871. kunmap_atomic(dst_kaddr, KM_USER0);
  2872. }
  2873. static void copy_pages(struct page *dst_page, struct page *src_page,
  2874. unsigned long dst_off, unsigned long src_off,
  2875. unsigned long len)
  2876. {
  2877. char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
  2878. char *src_kaddr;
  2879. if (dst_page != src_page)
  2880. src_kaddr = kmap_atomic(src_page, KM_USER1);
  2881. else
  2882. src_kaddr = dst_kaddr;
  2883. memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
  2884. kunmap_atomic(dst_kaddr, KM_USER0);
  2885. if (dst_page != src_page)
  2886. kunmap_atomic(src_kaddr, KM_USER1);
  2887. }
  2888. void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  2889. unsigned long src_offset, unsigned long len)
  2890. {
  2891. size_t cur;
  2892. size_t dst_off_in_page;
  2893. size_t src_off_in_page;
  2894. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  2895. unsigned long dst_i;
  2896. unsigned long src_i;
  2897. if (src_offset + len > dst->len) {
  2898. printk("memmove bogus src_offset %lu move len %lu len %lu\n",
  2899. src_offset, len, dst->len);
  2900. BUG_ON(1);
  2901. }
  2902. if (dst_offset + len > dst->len) {
  2903. printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
  2904. dst_offset, len, dst->len);
  2905. BUG_ON(1);
  2906. }
  2907. while(len > 0) {
  2908. dst_off_in_page = (start_offset + dst_offset) &
  2909. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2910. src_off_in_page = (start_offset + src_offset) &
  2911. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2912. dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
  2913. src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
  2914. cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
  2915. src_off_in_page));
  2916. cur = min_t(unsigned long, cur,
  2917. (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
  2918. copy_pages(extent_buffer_page(dst, dst_i),
  2919. extent_buffer_page(dst, src_i),
  2920. dst_off_in_page, src_off_in_page, cur);
  2921. src_offset += cur;
  2922. dst_offset += cur;
  2923. len -= cur;
  2924. }
  2925. }
  2926. EXPORT_SYMBOL(memcpy_extent_buffer);
  2927. void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  2928. unsigned long src_offset, unsigned long len)
  2929. {
  2930. size_t cur;
  2931. size_t dst_off_in_page;
  2932. size_t src_off_in_page;
  2933. unsigned long dst_end = dst_offset + len - 1;
  2934. unsigned long src_end = src_offset + len - 1;
  2935. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  2936. unsigned long dst_i;
  2937. unsigned long src_i;
  2938. if (src_offset + len > dst->len) {
  2939. printk("memmove bogus src_offset %lu move len %lu len %lu\n",
  2940. src_offset, len, dst->len);
  2941. BUG_ON(1);
  2942. }
  2943. if (dst_offset + len > dst->len) {
  2944. printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
  2945. dst_offset, len, dst->len);
  2946. BUG_ON(1);
  2947. }
  2948. if (dst_offset < src_offset) {
  2949. memcpy_extent_buffer(dst, dst_offset, src_offset, len);
  2950. return;
  2951. }
  2952. while(len > 0) {
  2953. dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
  2954. src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
  2955. dst_off_in_page = (start_offset + dst_end) &
  2956. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2957. src_off_in_page = (start_offset + src_end) &
  2958. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2959. cur = min_t(unsigned long, len, src_off_in_page + 1);
  2960. cur = min(cur, dst_off_in_page + 1);
  2961. move_pages(extent_buffer_page(dst, dst_i),
  2962. extent_buffer_page(dst, src_i),
  2963. dst_off_in_page - cur + 1,
  2964. src_off_in_page - cur + 1, cur);
  2965. dst_end -= cur;
  2966. src_end -= cur;
  2967. len -= cur;
  2968. }
  2969. }
  2970. EXPORT_SYMBOL(memmove_extent_buffer);