extent_map.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835
  1. #include <linux/bitops.h>
  2. #include <linux/slab.h>
  3. #include <linux/bio.h>
  4. #include <linux/mm.h>
  5. #include <linux/gfp.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/page-flags.h>
  8. #include <linux/module.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/swap.h>
  12. #include <linux/version.h>
  13. #include <linux/writeback.h>
  14. #include "extent_map.h"
  15. /* temporary define until extent_map moves out of btrfs */
  16. struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
  17. unsigned long extra_flags,
  18. void (*ctor)(void *, struct kmem_cache *,
  19. unsigned long));
  20. static struct kmem_cache *extent_map_cache;
  21. static struct kmem_cache *extent_state_cache;
  22. static struct kmem_cache *extent_buffer_cache;
  23. static LIST_HEAD(buffers);
  24. static LIST_HEAD(states);
  25. static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
  26. #define BUFFER_LRU_MAX 64
  27. struct tree_entry {
  28. u64 start;
  29. u64 end;
  30. int in_tree;
  31. struct rb_node rb_node;
  32. };
  33. struct extent_page_data {
  34. struct bio *bio;
  35. struct extent_map_tree *tree;
  36. get_extent_t *get_extent;
  37. };
  38. void __init extent_map_init(void)
  39. {
  40. extent_map_cache = btrfs_cache_create("extent_map",
  41. sizeof(struct extent_map), 0,
  42. NULL);
  43. extent_state_cache = btrfs_cache_create("extent_state",
  44. sizeof(struct extent_state), 0,
  45. NULL);
  46. extent_buffer_cache = btrfs_cache_create("extent_buffers",
  47. sizeof(struct extent_buffer), 0,
  48. NULL);
  49. }
  50. void __exit extent_map_exit(void)
  51. {
  52. struct extent_state *state;
  53. while (!list_empty(&states)) {
  54. state = list_entry(states.next, struct extent_state, list);
  55. printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
  56. list_del(&state->list);
  57. kmem_cache_free(extent_state_cache, state);
  58. }
  59. if (extent_map_cache)
  60. kmem_cache_destroy(extent_map_cache);
  61. if (extent_state_cache)
  62. kmem_cache_destroy(extent_state_cache);
  63. if (extent_buffer_cache)
  64. kmem_cache_destroy(extent_buffer_cache);
  65. }
  66. void extent_map_tree_init(struct extent_map_tree *tree,
  67. struct address_space *mapping, gfp_t mask)
  68. {
  69. tree->map.rb_node = NULL;
  70. tree->state.rb_node = NULL;
  71. tree->ops = NULL;
  72. rwlock_init(&tree->lock);
  73. spin_lock_init(&tree->lru_lock);
  74. tree->mapping = mapping;
  75. INIT_LIST_HEAD(&tree->buffer_lru);
  76. tree->lru_size = 0;
  77. }
  78. EXPORT_SYMBOL(extent_map_tree_init);
  79. void extent_map_tree_empty_lru(struct extent_map_tree *tree)
  80. {
  81. struct extent_buffer *eb;
  82. while(!list_empty(&tree->buffer_lru)) {
  83. eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
  84. lru);
  85. list_del(&eb->lru);
  86. free_extent_buffer(eb);
  87. }
  88. }
  89. EXPORT_SYMBOL(extent_map_tree_empty_lru);
  90. struct extent_map *alloc_extent_map(gfp_t mask)
  91. {
  92. struct extent_map *em;
  93. em = kmem_cache_alloc(extent_map_cache, mask);
  94. if (!em || IS_ERR(em))
  95. return em;
  96. em->in_tree = 0;
  97. atomic_set(&em->refs, 1);
  98. return em;
  99. }
  100. EXPORT_SYMBOL(alloc_extent_map);
  101. void free_extent_map(struct extent_map *em)
  102. {
  103. if (!em)
  104. return;
  105. if (atomic_dec_and_test(&em->refs)) {
  106. WARN_ON(em->in_tree);
  107. kmem_cache_free(extent_map_cache, em);
  108. }
  109. }
  110. EXPORT_SYMBOL(free_extent_map);
  111. struct extent_state *alloc_extent_state(gfp_t mask)
  112. {
  113. struct extent_state *state;
  114. unsigned long flags;
  115. state = kmem_cache_alloc(extent_state_cache, mask);
  116. if (!state || IS_ERR(state))
  117. return state;
  118. state->state = 0;
  119. state->in_tree = 0;
  120. state->private = 0;
  121. spin_lock_irqsave(&state_lock, flags);
  122. list_add(&state->list, &states);
  123. spin_unlock_irqrestore(&state_lock, flags);
  124. atomic_set(&state->refs, 1);
  125. init_waitqueue_head(&state->wq);
  126. return state;
  127. }
  128. EXPORT_SYMBOL(alloc_extent_state);
  129. void free_extent_state(struct extent_state *state)
  130. {
  131. unsigned long flags;
  132. if (!state)
  133. return;
  134. if (atomic_dec_and_test(&state->refs)) {
  135. WARN_ON(state->in_tree);
  136. spin_lock_irqsave(&state_lock, flags);
  137. list_del(&state->list);
  138. spin_unlock_irqrestore(&state_lock, flags);
  139. kmem_cache_free(extent_state_cache, state);
  140. }
  141. }
  142. EXPORT_SYMBOL(free_extent_state);
  143. static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
  144. struct rb_node *node)
  145. {
  146. struct rb_node ** p = &root->rb_node;
  147. struct rb_node * parent = NULL;
  148. struct tree_entry *entry;
  149. while(*p) {
  150. parent = *p;
  151. entry = rb_entry(parent, struct tree_entry, rb_node);
  152. if (offset < entry->start)
  153. p = &(*p)->rb_left;
  154. else if (offset > entry->end)
  155. p = &(*p)->rb_right;
  156. else
  157. return parent;
  158. }
  159. entry = rb_entry(node, struct tree_entry, rb_node);
  160. entry->in_tree = 1;
  161. rb_link_node(node, parent, p);
  162. rb_insert_color(node, root);
  163. return NULL;
  164. }
  165. static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
  166. struct rb_node **prev_ret)
  167. {
  168. struct rb_node * n = root->rb_node;
  169. struct rb_node *prev = NULL;
  170. struct tree_entry *entry;
  171. struct tree_entry *prev_entry = NULL;
  172. while(n) {
  173. entry = rb_entry(n, struct tree_entry, rb_node);
  174. prev = n;
  175. prev_entry = entry;
  176. if (offset < entry->start)
  177. n = n->rb_left;
  178. else if (offset > entry->end)
  179. n = n->rb_right;
  180. else
  181. return n;
  182. }
  183. if (!prev_ret)
  184. return NULL;
  185. while(prev && offset > prev_entry->end) {
  186. prev = rb_next(prev);
  187. prev_entry = rb_entry(prev, struct tree_entry, rb_node);
  188. }
  189. *prev_ret = prev;
  190. return NULL;
  191. }
  192. static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
  193. {
  194. struct rb_node *prev;
  195. struct rb_node *ret;
  196. ret = __tree_search(root, offset, &prev);
  197. if (!ret)
  198. return prev;
  199. return ret;
  200. }
  201. static int tree_delete(struct rb_root *root, u64 offset)
  202. {
  203. struct rb_node *node;
  204. struct tree_entry *entry;
  205. node = __tree_search(root, offset, NULL);
  206. if (!node)
  207. return -ENOENT;
  208. entry = rb_entry(node, struct tree_entry, rb_node);
  209. entry->in_tree = 0;
  210. rb_erase(node, root);
  211. return 0;
  212. }
  213. /*
  214. * add_extent_mapping tries a simple backward merge with existing
  215. * mappings. The extent_map struct passed in will be inserted into
  216. * the tree directly (no copies made, just a reference taken).
  217. */
  218. int add_extent_mapping(struct extent_map_tree *tree,
  219. struct extent_map *em)
  220. {
  221. int ret = 0;
  222. struct extent_map *prev = NULL;
  223. struct rb_node *rb;
  224. write_lock_irq(&tree->lock);
  225. rb = tree_insert(&tree->map, em->end, &em->rb_node);
  226. if (rb) {
  227. prev = rb_entry(rb, struct extent_map, rb_node);
  228. printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
  229. ret = -EEXIST;
  230. goto out;
  231. }
  232. atomic_inc(&em->refs);
  233. if (em->start != 0) {
  234. rb = rb_prev(&em->rb_node);
  235. if (rb)
  236. prev = rb_entry(rb, struct extent_map, rb_node);
  237. if (prev && prev->end + 1 == em->start &&
  238. ((em->block_start == EXTENT_MAP_HOLE &&
  239. prev->block_start == EXTENT_MAP_HOLE) ||
  240. (em->block_start == EXTENT_MAP_INLINE &&
  241. prev->block_start == EXTENT_MAP_INLINE) ||
  242. (em->block_start == EXTENT_MAP_DELALLOC &&
  243. prev->block_start == EXTENT_MAP_DELALLOC) ||
  244. (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
  245. em->block_start == prev->block_end + 1))) {
  246. em->start = prev->start;
  247. em->block_start = prev->block_start;
  248. rb_erase(&prev->rb_node, &tree->map);
  249. prev->in_tree = 0;
  250. free_extent_map(prev);
  251. }
  252. }
  253. out:
  254. write_unlock_irq(&tree->lock);
  255. return ret;
  256. }
  257. EXPORT_SYMBOL(add_extent_mapping);
  258. /*
  259. * lookup_extent_mapping returns the first extent_map struct in the
  260. * tree that intersects the [start, end] (inclusive) range. There may
  261. * be additional objects in the tree that intersect, so check the object
  262. * returned carefully to make sure you don't need additional lookups.
  263. */
  264. struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
  265. u64 start, u64 end)
  266. {
  267. struct extent_map *em;
  268. struct rb_node *rb_node;
  269. read_lock_irq(&tree->lock);
  270. rb_node = tree_search(&tree->map, start);
  271. if (!rb_node) {
  272. em = NULL;
  273. goto out;
  274. }
  275. if (IS_ERR(rb_node)) {
  276. em = ERR_PTR(PTR_ERR(rb_node));
  277. goto out;
  278. }
  279. em = rb_entry(rb_node, struct extent_map, rb_node);
  280. if (em->end < start || em->start > end) {
  281. em = NULL;
  282. goto out;
  283. }
  284. atomic_inc(&em->refs);
  285. out:
  286. read_unlock_irq(&tree->lock);
  287. return em;
  288. }
  289. EXPORT_SYMBOL(lookup_extent_mapping);
  290. /*
  291. * removes an extent_map struct from the tree. No reference counts are
  292. * dropped, and no checks are done to see if the range is in use
  293. */
  294. int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
  295. {
  296. int ret;
  297. write_lock_irq(&tree->lock);
  298. ret = tree_delete(&tree->map, em->end);
  299. write_unlock_irq(&tree->lock);
  300. return ret;
  301. }
  302. EXPORT_SYMBOL(remove_extent_mapping);
  303. /*
  304. * utility function to look for merge candidates inside a given range.
  305. * Any extents with matching state are merged together into a single
  306. * extent in the tree. Extents with EXTENT_IO in their state field
  307. * are not merged because the end_io handlers need to be able to do
  308. * operations on them without sleeping (or doing allocations/splits).
  309. *
  310. * This should be called with the tree lock held.
  311. */
  312. static int merge_state(struct extent_map_tree *tree,
  313. struct extent_state *state)
  314. {
  315. struct extent_state *other;
  316. struct rb_node *other_node;
  317. if (state->state & EXTENT_IOBITS)
  318. return 0;
  319. other_node = rb_prev(&state->rb_node);
  320. if (other_node) {
  321. other = rb_entry(other_node, struct extent_state, rb_node);
  322. if (other->end == state->start - 1 &&
  323. other->state == state->state) {
  324. state->start = other->start;
  325. other->in_tree = 0;
  326. rb_erase(&other->rb_node, &tree->state);
  327. free_extent_state(other);
  328. }
  329. }
  330. other_node = rb_next(&state->rb_node);
  331. if (other_node) {
  332. other = rb_entry(other_node, struct extent_state, rb_node);
  333. if (other->start == state->end + 1 &&
  334. other->state == state->state) {
  335. other->start = state->start;
  336. state->in_tree = 0;
  337. rb_erase(&state->rb_node, &tree->state);
  338. free_extent_state(state);
  339. }
  340. }
  341. return 0;
  342. }
  343. /*
  344. * insert an extent_state struct into the tree. 'bits' are set on the
  345. * struct before it is inserted.
  346. *
  347. * This may return -EEXIST if the extent is already there, in which case the
  348. * state struct is freed.
  349. *
  350. * The tree lock is not taken internally. This is a utility function and
  351. * probably isn't what you want to call (see set/clear_extent_bit).
  352. */
  353. static int insert_state(struct extent_map_tree *tree,
  354. struct extent_state *state, u64 start, u64 end,
  355. int bits)
  356. {
  357. struct rb_node *node;
  358. if (end < start) {
  359. printk("end < start %Lu %Lu\n", end, start);
  360. WARN_ON(1);
  361. }
  362. state->state |= bits;
  363. state->start = start;
  364. state->end = end;
  365. node = tree_insert(&tree->state, end, &state->rb_node);
  366. if (node) {
  367. struct extent_state *found;
  368. found = rb_entry(node, struct extent_state, rb_node);
  369. printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
  370. free_extent_state(state);
  371. return -EEXIST;
  372. }
  373. merge_state(tree, state);
  374. return 0;
  375. }
  376. /*
  377. * split a given extent state struct in two, inserting the preallocated
  378. * struct 'prealloc' as the newly created second half. 'split' indicates an
  379. * offset inside 'orig' where it should be split.
  380. *
  381. * Before calling,
  382. * the tree has 'orig' at [orig->start, orig->end]. After calling, there
  383. * are two extent state structs in the tree:
  384. * prealloc: [orig->start, split - 1]
  385. * orig: [ split, orig->end ]
  386. *
  387. * The tree locks are not taken by this function. They need to be held
  388. * by the caller.
  389. */
  390. static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
  391. struct extent_state *prealloc, u64 split)
  392. {
  393. struct rb_node *node;
  394. prealloc->start = orig->start;
  395. prealloc->end = split - 1;
  396. prealloc->state = orig->state;
  397. orig->start = split;
  398. node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
  399. if (node) {
  400. struct extent_state *found;
  401. found = rb_entry(node, struct extent_state, rb_node);
  402. printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
  403. free_extent_state(prealloc);
  404. return -EEXIST;
  405. }
  406. return 0;
  407. }
  408. /*
  409. * utility function to clear some bits in an extent state struct.
  410. * it will optionally wake up any one waiting on this state (wake == 1), or
  411. * forcibly remove the state from the tree (delete == 1).
  412. *
  413. * If no bits are set on the state struct after clearing things, the
  414. * struct is freed and removed from the tree
  415. */
  416. static int clear_state_bit(struct extent_map_tree *tree,
  417. struct extent_state *state, int bits, int wake,
  418. int delete)
  419. {
  420. int ret = state->state & bits;
  421. state->state &= ~bits;
  422. if (wake)
  423. wake_up(&state->wq);
  424. if (delete || state->state == 0) {
  425. if (state->in_tree) {
  426. rb_erase(&state->rb_node, &tree->state);
  427. state->in_tree = 0;
  428. free_extent_state(state);
  429. } else {
  430. WARN_ON(1);
  431. }
  432. } else {
  433. merge_state(tree, state);
  434. }
  435. return ret;
  436. }
  437. /*
  438. * clear some bits on a range in the tree. This may require splitting
  439. * or inserting elements in the tree, so the gfp mask is used to
  440. * indicate which allocations or sleeping are allowed.
  441. *
  442. * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
  443. * the given range from the tree regardless of state (ie for truncate).
  444. *
  445. * the range [start, end] is inclusive.
  446. *
  447. * This takes the tree lock, and returns < 0 on error, > 0 if any of the
  448. * bits were already set, or zero if none of the bits were already set.
  449. */
  450. int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
  451. int bits, int wake, int delete, gfp_t mask)
  452. {
  453. struct extent_state *state;
  454. struct extent_state *prealloc = NULL;
  455. struct rb_node *node;
  456. unsigned long flags;
  457. int err;
  458. int set = 0;
  459. again:
  460. if (!prealloc && (mask & __GFP_WAIT)) {
  461. prealloc = alloc_extent_state(mask);
  462. if (!prealloc)
  463. return -ENOMEM;
  464. }
  465. write_lock_irqsave(&tree->lock, flags);
  466. /*
  467. * this search will find the extents that end after
  468. * our range starts
  469. */
  470. node = tree_search(&tree->state, start);
  471. if (!node)
  472. goto out;
  473. state = rb_entry(node, struct extent_state, rb_node);
  474. if (state->start > end)
  475. goto out;
  476. WARN_ON(state->end < start);
  477. /*
  478. * | ---- desired range ---- |
  479. * | state | or
  480. * | ------------- state -------------- |
  481. *
  482. * We need to split the extent we found, and may flip
  483. * bits on second half.
  484. *
  485. * If the extent we found extends past our range, we
  486. * just split and search again. It'll get split again
  487. * the next time though.
  488. *
  489. * If the extent we found is inside our range, we clear
  490. * the desired bit on it.
  491. */
  492. if (state->start < start) {
  493. err = split_state(tree, state, prealloc, start);
  494. BUG_ON(err == -EEXIST);
  495. prealloc = NULL;
  496. if (err)
  497. goto out;
  498. if (state->end <= end) {
  499. start = state->end + 1;
  500. set |= clear_state_bit(tree, state, bits,
  501. wake, delete);
  502. } else {
  503. start = state->start;
  504. }
  505. goto search_again;
  506. }
  507. /*
  508. * | ---- desired range ---- |
  509. * | state |
  510. * We need to split the extent, and clear the bit
  511. * on the first half
  512. */
  513. if (state->start <= end && state->end > end) {
  514. err = split_state(tree, state, prealloc, end + 1);
  515. BUG_ON(err == -EEXIST);
  516. if (wake)
  517. wake_up(&state->wq);
  518. set |= clear_state_bit(tree, prealloc, bits,
  519. wake, delete);
  520. prealloc = NULL;
  521. goto out;
  522. }
  523. start = state->end + 1;
  524. set |= clear_state_bit(tree, state, bits, wake, delete);
  525. goto search_again;
  526. out:
  527. write_unlock_irqrestore(&tree->lock, flags);
  528. if (prealloc)
  529. free_extent_state(prealloc);
  530. return set;
  531. search_again:
  532. if (start > end)
  533. goto out;
  534. write_unlock_irqrestore(&tree->lock, flags);
  535. if (mask & __GFP_WAIT)
  536. cond_resched();
  537. goto again;
  538. }
  539. EXPORT_SYMBOL(clear_extent_bit);
  540. static int wait_on_state(struct extent_map_tree *tree,
  541. struct extent_state *state)
  542. {
  543. DEFINE_WAIT(wait);
  544. prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
  545. read_unlock_irq(&tree->lock);
  546. schedule();
  547. read_lock_irq(&tree->lock);
  548. finish_wait(&state->wq, &wait);
  549. return 0;
  550. }
  551. /*
  552. * waits for one or more bits to clear on a range in the state tree.
  553. * The range [start, end] is inclusive.
  554. * The tree lock is taken by this function
  555. */
  556. int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
  557. {
  558. struct extent_state *state;
  559. struct rb_node *node;
  560. read_lock_irq(&tree->lock);
  561. again:
  562. while (1) {
  563. /*
  564. * this search will find all the extents that end after
  565. * our range starts
  566. */
  567. node = tree_search(&tree->state, start);
  568. if (!node)
  569. break;
  570. state = rb_entry(node, struct extent_state, rb_node);
  571. if (state->start > end)
  572. goto out;
  573. if (state->state & bits) {
  574. start = state->start;
  575. atomic_inc(&state->refs);
  576. wait_on_state(tree, state);
  577. free_extent_state(state);
  578. goto again;
  579. }
  580. start = state->end + 1;
  581. if (start > end)
  582. break;
  583. if (need_resched()) {
  584. read_unlock_irq(&tree->lock);
  585. cond_resched();
  586. read_lock_irq(&tree->lock);
  587. }
  588. }
  589. out:
  590. read_unlock_irq(&tree->lock);
  591. return 0;
  592. }
  593. EXPORT_SYMBOL(wait_extent_bit);
  594. /*
  595. * set some bits on a range in the tree. This may require allocations
  596. * or sleeping, so the gfp mask is used to indicate what is allowed.
  597. *
  598. * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
  599. * range already has the desired bits set. The start of the existing
  600. * range is returned in failed_start in this case.
  601. *
  602. * [start, end] is inclusive
  603. * This takes the tree lock.
  604. */
  605. int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
  606. int exclusive, u64 *failed_start, gfp_t mask)
  607. {
  608. struct extent_state *state;
  609. struct extent_state *prealloc = NULL;
  610. struct rb_node *node;
  611. unsigned long flags;
  612. int err = 0;
  613. int set;
  614. u64 last_start;
  615. u64 last_end;
  616. again:
  617. if (!prealloc && (mask & __GFP_WAIT)) {
  618. prealloc = alloc_extent_state(mask);
  619. if (!prealloc)
  620. return -ENOMEM;
  621. }
  622. write_lock_irqsave(&tree->lock, flags);
  623. /*
  624. * this search will find all the extents that end after
  625. * our range starts.
  626. */
  627. node = tree_search(&tree->state, start);
  628. if (!node) {
  629. err = insert_state(tree, prealloc, start, end, bits);
  630. prealloc = NULL;
  631. BUG_ON(err == -EEXIST);
  632. goto out;
  633. }
  634. state = rb_entry(node, struct extent_state, rb_node);
  635. last_start = state->start;
  636. last_end = state->end;
  637. /*
  638. * | ---- desired range ---- |
  639. * | state |
  640. *
  641. * Just lock what we found and keep going
  642. */
  643. if (state->start == start && state->end <= end) {
  644. set = state->state & bits;
  645. if (set && exclusive) {
  646. *failed_start = state->start;
  647. err = -EEXIST;
  648. goto out;
  649. }
  650. state->state |= bits;
  651. start = state->end + 1;
  652. merge_state(tree, state);
  653. goto search_again;
  654. }
  655. /*
  656. * | ---- desired range ---- |
  657. * | state |
  658. * or
  659. * | ------------- state -------------- |
  660. *
  661. * We need to split the extent we found, and may flip bits on
  662. * second half.
  663. *
  664. * If the extent we found extends past our
  665. * range, we just split and search again. It'll get split
  666. * again the next time though.
  667. *
  668. * If the extent we found is inside our range, we set the
  669. * desired bit on it.
  670. */
  671. if (state->start < start) {
  672. set = state->state & bits;
  673. if (exclusive && set) {
  674. *failed_start = start;
  675. err = -EEXIST;
  676. goto out;
  677. }
  678. err = split_state(tree, state, prealloc, start);
  679. BUG_ON(err == -EEXIST);
  680. prealloc = NULL;
  681. if (err)
  682. goto out;
  683. if (state->end <= end) {
  684. state->state |= bits;
  685. start = state->end + 1;
  686. merge_state(tree, state);
  687. } else {
  688. start = state->start;
  689. }
  690. goto search_again;
  691. }
  692. /*
  693. * | ---- desired range ---- |
  694. * | state | or | state |
  695. *
  696. * There's a hole, we need to insert something in it and
  697. * ignore the extent we found.
  698. */
  699. if (state->start > start) {
  700. u64 this_end;
  701. if (end < last_start)
  702. this_end = end;
  703. else
  704. this_end = last_start -1;
  705. err = insert_state(tree, prealloc, start, this_end,
  706. bits);
  707. prealloc = NULL;
  708. BUG_ON(err == -EEXIST);
  709. if (err)
  710. goto out;
  711. start = this_end + 1;
  712. goto search_again;
  713. }
  714. /*
  715. * | ---- desired range ---- |
  716. * | state |
  717. * We need to split the extent, and set the bit
  718. * on the first half
  719. */
  720. if (state->start <= end && state->end > end) {
  721. set = state->state & bits;
  722. if (exclusive && set) {
  723. *failed_start = start;
  724. err = -EEXIST;
  725. goto out;
  726. }
  727. err = split_state(tree, state, prealloc, end + 1);
  728. BUG_ON(err == -EEXIST);
  729. prealloc->state |= bits;
  730. merge_state(tree, prealloc);
  731. prealloc = NULL;
  732. goto out;
  733. }
  734. goto search_again;
  735. out:
  736. write_unlock_irqrestore(&tree->lock, flags);
  737. if (prealloc)
  738. free_extent_state(prealloc);
  739. return err;
  740. search_again:
  741. if (start > end)
  742. goto out;
  743. write_unlock_irqrestore(&tree->lock, flags);
  744. if (mask & __GFP_WAIT)
  745. cond_resched();
  746. goto again;
  747. }
  748. EXPORT_SYMBOL(set_extent_bit);
  749. /* wrappers around set/clear extent bit */
  750. int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
  751. gfp_t mask)
  752. {
  753. return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
  754. mask);
  755. }
  756. EXPORT_SYMBOL(set_extent_dirty);
  757. int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
  758. int bits, gfp_t mask)
  759. {
  760. return set_extent_bit(tree, start, end, bits, 0, NULL,
  761. mask);
  762. }
  763. EXPORT_SYMBOL(set_extent_bits);
  764. int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
  765. int bits, gfp_t mask)
  766. {
  767. return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
  768. }
  769. EXPORT_SYMBOL(clear_extent_bits);
  770. int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
  771. gfp_t mask)
  772. {
  773. return set_extent_bit(tree, start, end,
  774. EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
  775. mask);
  776. }
  777. EXPORT_SYMBOL(set_extent_delalloc);
  778. int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
  779. gfp_t mask)
  780. {
  781. return clear_extent_bit(tree, start, end,
  782. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
  783. }
  784. EXPORT_SYMBOL(clear_extent_dirty);
  785. int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
  786. gfp_t mask)
  787. {
  788. return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
  789. mask);
  790. }
  791. EXPORT_SYMBOL(set_extent_new);
  792. int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
  793. gfp_t mask)
  794. {
  795. return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
  796. }
  797. EXPORT_SYMBOL(clear_extent_new);
  798. int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
  799. gfp_t mask)
  800. {
  801. return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
  802. mask);
  803. }
  804. EXPORT_SYMBOL(set_extent_uptodate);
  805. int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
  806. gfp_t mask)
  807. {
  808. return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
  809. }
  810. EXPORT_SYMBOL(clear_extent_uptodate);
  811. int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
  812. gfp_t mask)
  813. {
  814. return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
  815. 0, NULL, mask);
  816. }
  817. EXPORT_SYMBOL(set_extent_writeback);
  818. int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
  819. gfp_t mask)
  820. {
  821. return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
  822. }
  823. EXPORT_SYMBOL(clear_extent_writeback);
  824. int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
  825. {
  826. return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
  827. }
  828. EXPORT_SYMBOL(wait_on_extent_writeback);
  829. /*
  830. * locks a range in ascending order, waiting for any locked regions
  831. * it hits on the way. [start,end] are inclusive, and this will sleep.
  832. */
  833. int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
  834. {
  835. int err;
  836. u64 failed_start;
  837. while (1) {
  838. err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
  839. &failed_start, mask);
  840. if (err == -EEXIST && (mask & __GFP_WAIT)) {
  841. wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
  842. start = failed_start;
  843. } else {
  844. break;
  845. }
  846. WARN_ON(start > end);
  847. }
  848. return err;
  849. }
  850. EXPORT_SYMBOL(lock_extent);
  851. int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
  852. gfp_t mask)
  853. {
  854. return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
  855. }
  856. EXPORT_SYMBOL(unlock_extent);
  857. /*
  858. * helper function to set pages and extents in the tree dirty
  859. */
  860. int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
  861. {
  862. unsigned long index = start >> PAGE_CACHE_SHIFT;
  863. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  864. struct page *page;
  865. while (index <= end_index) {
  866. page = find_get_page(tree->mapping, index);
  867. BUG_ON(!page);
  868. __set_page_dirty_nobuffers(page);
  869. page_cache_release(page);
  870. index++;
  871. }
  872. set_extent_dirty(tree, start, end, GFP_NOFS);
  873. return 0;
  874. }
  875. EXPORT_SYMBOL(set_range_dirty);
  876. /*
  877. * helper function to set both pages and extents in the tree writeback
  878. */
  879. int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
  880. {
  881. unsigned long index = start >> PAGE_CACHE_SHIFT;
  882. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  883. struct page *page;
  884. while (index <= end_index) {
  885. page = find_get_page(tree->mapping, index);
  886. BUG_ON(!page);
  887. set_page_writeback(page);
  888. page_cache_release(page);
  889. index++;
  890. }
  891. set_extent_writeback(tree, start, end, GFP_NOFS);
  892. return 0;
  893. }
  894. EXPORT_SYMBOL(set_range_writeback);
  895. int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
  896. u64 *start_ret, u64 *end_ret, int bits)
  897. {
  898. struct rb_node *node;
  899. struct extent_state *state;
  900. int ret = 1;
  901. read_lock_irq(&tree->lock);
  902. /*
  903. * this search will find all the extents that end after
  904. * our range starts.
  905. */
  906. node = tree_search(&tree->state, start);
  907. if (!node || IS_ERR(node)) {
  908. goto out;
  909. }
  910. while(1) {
  911. state = rb_entry(node, struct extent_state, rb_node);
  912. if (state->end >= start && (state->state & bits)) {
  913. *start_ret = state->start;
  914. *end_ret = state->end;
  915. ret = 0;
  916. break;
  917. }
  918. node = rb_next(node);
  919. if (!node)
  920. break;
  921. }
  922. out:
  923. read_unlock_irq(&tree->lock);
  924. return ret;
  925. }
  926. EXPORT_SYMBOL(find_first_extent_bit);
  927. u64 find_lock_delalloc_range(struct extent_map_tree *tree,
  928. u64 start, u64 lock_start, u64 *end, u64 max_bytes)
  929. {
  930. struct rb_node *node;
  931. struct extent_state *state;
  932. u64 cur_start = start;
  933. u64 found = 0;
  934. u64 total_bytes = 0;
  935. write_lock_irq(&tree->lock);
  936. /*
  937. * this search will find all the extents that end after
  938. * our range starts.
  939. */
  940. search_again:
  941. node = tree_search(&tree->state, cur_start);
  942. if (!node || IS_ERR(node)) {
  943. goto out;
  944. }
  945. while(1) {
  946. state = rb_entry(node, struct extent_state, rb_node);
  947. if (state->start != cur_start) {
  948. goto out;
  949. }
  950. if (!(state->state & EXTENT_DELALLOC)) {
  951. goto out;
  952. }
  953. if (state->start >= lock_start) {
  954. if (state->state & EXTENT_LOCKED) {
  955. DEFINE_WAIT(wait);
  956. atomic_inc(&state->refs);
  957. prepare_to_wait(&state->wq, &wait,
  958. TASK_UNINTERRUPTIBLE);
  959. write_unlock_irq(&tree->lock);
  960. schedule();
  961. write_lock_irq(&tree->lock);
  962. finish_wait(&state->wq, &wait);
  963. free_extent_state(state);
  964. goto search_again;
  965. }
  966. state->state |= EXTENT_LOCKED;
  967. }
  968. found++;
  969. *end = state->end;
  970. cur_start = state->end + 1;
  971. node = rb_next(node);
  972. if (!node)
  973. break;
  974. total_bytes += state->end - state->start + 1;
  975. if (total_bytes >= max_bytes)
  976. break;
  977. }
  978. out:
  979. write_unlock_irq(&tree->lock);
  980. return found;
  981. }
  982. /*
  983. * helper function to lock both pages and extents in the tree.
  984. * pages must be locked first.
  985. */
  986. int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
  987. {
  988. unsigned long index = start >> PAGE_CACHE_SHIFT;
  989. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  990. struct page *page;
  991. int err;
  992. while (index <= end_index) {
  993. page = grab_cache_page(tree->mapping, index);
  994. if (!page) {
  995. err = -ENOMEM;
  996. goto failed;
  997. }
  998. if (IS_ERR(page)) {
  999. err = PTR_ERR(page);
  1000. goto failed;
  1001. }
  1002. index++;
  1003. }
  1004. lock_extent(tree, start, end, GFP_NOFS);
  1005. return 0;
  1006. failed:
  1007. /*
  1008. * we failed above in getting the page at 'index', so we undo here
  1009. * up to but not including the page at 'index'
  1010. */
  1011. end_index = index;
  1012. index = start >> PAGE_CACHE_SHIFT;
  1013. while (index < end_index) {
  1014. page = find_get_page(tree->mapping, index);
  1015. unlock_page(page);
  1016. page_cache_release(page);
  1017. index++;
  1018. }
  1019. return err;
  1020. }
  1021. EXPORT_SYMBOL(lock_range);
  1022. /*
  1023. * helper function to unlock both pages and extents in the tree.
  1024. */
  1025. int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
  1026. {
  1027. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1028. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1029. struct page *page;
  1030. while (index <= end_index) {
  1031. page = find_get_page(tree->mapping, index);
  1032. unlock_page(page);
  1033. page_cache_release(page);
  1034. index++;
  1035. }
  1036. unlock_extent(tree, start, end, GFP_NOFS);
  1037. return 0;
  1038. }
  1039. EXPORT_SYMBOL(unlock_range);
  1040. int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
  1041. {
  1042. struct rb_node *node;
  1043. struct extent_state *state;
  1044. int ret = 0;
  1045. write_lock_irq(&tree->lock);
  1046. /*
  1047. * this search will find all the extents that end after
  1048. * our range starts.
  1049. */
  1050. node = tree_search(&tree->state, start);
  1051. if (!node || IS_ERR(node)) {
  1052. ret = -ENOENT;
  1053. goto out;
  1054. }
  1055. state = rb_entry(node, struct extent_state, rb_node);
  1056. if (state->start != start) {
  1057. ret = -ENOENT;
  1058. goto out;
  1059. }
  1060. state->private = private;
  1061. out:
  1062. write_unlock_irq(&tree->lock);
  1063. return ret;
  1064. }
  1065. int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
  1066. {
  1067. struct rb_node *node;
  1068. struct extent_state *state;
  1069. int ret = 0;
  1070. read_lock_irq(&tree->lock);
  1071. /*
  1072. * this search will find all the extents that end after
  1073. * our range starts.
  1074. */
  1075. node = tree_search(&tree->state, start);
  1076. if (!node || IS_ERR(node)) {
  1077. ret = -ENOENT;
  1078. goto out;
  1079. }
  1080. state = rb_entry(node, struct extent_state, rb_node);
  1081. if (state->start != start) {
  1082. ret = -ENOENT;
  1083. goto out;
  1084. }
  1085. *private = state->private;
  1086. out:
  1087. read_unlock_irq(&tree->lock);
  1088. return ret;
  1089. }
  1090. /*
  1091. * searches a range in the state tree for a given mask.
  1092. * If 'filled' == 1, this returns 1 only if ever extent in the tree
  1093. * has the bits set. Otherwise, 1 is returned if any bit in the
  1094. * range is found set.
  1095. */
  1096. int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
  1097. int bits, int filled)
  1098. {
  1099. struct extent_state *state = NULL;
  1100. struct rb_node *node;
  1101. int bitset = 0;
  1102. read_lock_irq(&tree->lock);
  1103. node = tree_search(&tree->state, start);
  1104. while (node && start <= end) {
  1105. state = rb_entry(node, struct extent_state, rb_node);
  1106. if (state->start > end)
  1107. break;
  1108. if (filled && state->start > start) {
  1109. bitset = 0;
  1110. break;
  1111. }
  1112. if (state->state & bits) {
  1113. bitset = 1;
  1114. if (!filled)
  1115. break;
  1116. } else if (filled) {
  1117. bitset = 0;
  1118. break;
  1119. }
  1120. start = state->end + 1;
  1121. if (start > end)
  1122. break;
  1123. node = rb_next(node);
  1124. }
  1125. read_unlock_irq(&tree->lock);
  1126. return bitset;
  1127. }
  1128. EXPORT_SYMBOL(test_range_bit);
  1129. /*
  1130. * helper function to set a given page up to date if all the
  1131. * extents in the tree for that page are up to date
  1132. */
  1133. static int check_page_uptodate(struct extent_map_tree *tree,
  1134. struct page *page)
  1135. {
  1136. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1137. u64 end = start + PAGE_CACHE_SIZE - 1;
  1138. if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
  1139. SetPageUptodate(page);
  1140. return 0;
  1141. }
  1142. /*
  1143. * helper function to unlock a page if all the extents in the tree
  1144. * for that page are unlocked
  1145. */
  1146. static int check_page_locked(struct extent_map_tree *tree,
  1147. struct page *page)
  1148. {
  1149. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1150. u64 end = start + PAGE_CACHE_SIZE - 1;
  1151. if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
  1152. unlock_page(page);
  1153. return 0;
  1154. }
  1155. /*
  1156. * helper function to end page writeback if all the extents
  1157. * in the tree for that page are done with writeback
  1158. */
  1159. static int check_page_writeback(struct extent_map_tree *tree,
  1160. struct page *page)
  1161. {
  1162. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1163. u64 end = start + PAGE_CACHE_SIZE - 1;
  1164. if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
  1165. end_page_writeback(page);
  1166. return 0;
  1167. }
  1168. /* lots and lots of room for performance fixes in the end_bio funcs */
  1169. /*
  1170. * after a writepage IO is done, we need to:
  1171. * clear the uptodate bits on error
  1172. * clear the writeback bits in the extent tree for this IO
  1173. * end_page_writeback if the page has no more pending IO
  1174. *
  1175. * Scheduling is not allowed, so the extent state tree is expected
  1176. * to have one and only one object corresponding to this IO.
  1177. */
  1178. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  1179. static void end_bio_extent_writepage(struct bio *bio, int err)
  1180. #else
  1181. static int end_bio_extent_writepage(struct bio *bio,
  1182. unsigned int bytes_done, int err)
  1183. #endif
  1184. {
  1185. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1186. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1187. struct extent_map_tree *tree = bio->bi_private;
  1188. u64 start;
  1189. u64 end;
  1190. int whole_page;
  1191. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1192. if (bio->bi_size)
  1193. return 1;
  1194. #endif
  1195. do {
  1196. struct page *page = bvec->bv_page;
  1197. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1198. bvec->bv_offset;
  1199. end = start + bvec->bv_len - 1;
  1200. if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
  1201. whole_page = 1;
  1202. else
  1203. whole_page = 0;
  1204. if (--bvec >= bio->bi_io_vec)
  1205. prefetchw(&bvec->bv_page->flags);
  1206. if (!uptodate) {
  1207. clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1208. ClearPageUptodate(page);
  1209. SetPageError(page);
  1210. }
  1211. clear_extent_writeback(tree, start, end, GFP_ATOMIC);
  1212. if (whole_page)
  1213. end_page_writeback(page);
  1214. else
  1215. check_page_writeback(tree, page);
  1216. if (tree->ops && tree->ops->writepage_end_io_hook)
  1217. tree->ops->writepage_end_io_hook(page, start, end);
  1218. } while (bvec >= bio->bi_io_vec);
  1219. bio_put(bio);
  1220. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1221. return 0;
  1222. #endif
  1223. }
  1224. /*
  1225. * after a readpage IO is done, we need to:
  1226. * clear the uptodate bits on error
  1227. * set the uptodate bits if things worked
  1228. * set the page up to date if all extents in the tree are uptodate
  1229. * clear the lock bit in the extent tree
  1230. * unlock the page if there are no other extents locked for it
  1231. *
  1232. * Scheduling is not allowed, so the extent state tree is expected
  1233. * to have one and only one object corresponding to this IO.
  1234. */
  1235. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  1236. static void end_bio_extent_readpage(struct bio *bio, int err)
  1237. #else
  1238. static int end_bio_extent_readpage(struct bio *bio,
  1239. unsigned int bytes_done, int err)
  1240. #endif
  1241. {
  1242. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1243. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1244. struct extent_map_tree *tree = bio->bi_private;
  1245. u64 start;
  1246. u64 end;
  1247. int whole_page;
  1248. int ret;
  1249. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1250. if (bio->bi_size)
  1251. return 1;
  1252. #endif
  1253. do {
  1254. struct page *page = bvec->bv_page;
  1255. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1256. bvec->bv_offset;
  1257. end = start + bvec->bv_len - 1;
  1258. if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
  1259. whole_page = 1;
  1260. else
  1261. whole_page = 0;
  1262. if (--bvec >= bio->bi_io_vec)
  1263. prefetchw(&bvec->bv_page->flags);
  1264. if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
  1265. ret = tree->ops->readpage_end_io_hook(page, start, end);
  1266. if (ret)
  1267. uptodate = 0;
  1268. }
  1269. if (uptodate) {
  1270. set_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1271. if (whole_page)
  1272. SetPageUptodate(page);
  1273. else
  1274. check_page_uptodate(tree, page);
  1275. } else {
  1276. ClearPageUptodate(page);
  1277. SetPageError(page);
  1278. }
  1279. unlock_extent(tree, start, end, GFP_ATOMIC);
  1280. if (whole_page)
  1281. unlock_page(page);
  1282. else
  1283. check_page_locked(tree, page);
  1284. } while (bvec >= bio->bi_io_vec);
  1285. bio_put(bio);
  1286. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1287. return 0;
  1288. #endif
  1289. }
  1290. /*
  1291. * IO done from prepare_write is pretty simple, we just unlock
  1292. * the structs in the extent tree when done, and set the uptodate bits
  1293. * as appropriate.
  1294. */
  1295. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  1296. static void end_bio_extent_preparewrite(struct bio *bio, int err)
  1297. #else
  1298. static int end_bio_extent_preparewrite(struct bio *bio,
  1299. unsigned int bytes_done, int err)
  1300. #endif
  1301. {
  1302. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1303. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1304. struct extent_map_tree *tree = bio->bi_private;
  1305. u64 start;
  1306. u64 end;
  1307. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1308. if (bio->bi_size)
  1309. return 1;
  1310. #endif
  1311. do {
  1312. struct page *page = bvec->bv_page;
  1313. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1314. bvec->bv_offset;
  1315. end = start + bvec->bv_len - 1;
  1316. if (--bvec >= bio->bi_io_vec)
  1317. prefetchw(&bvec->bv_page->flags);
  1318. if (uptodate) {
  1319. set_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1320. } else {
  1321. ClearPageUptodate(page);
  1322. SetPageError(page);
  1323. }
  1324. unlock_extent(tree, start, end, GFP_ATOMIC);
  1325. } while (bvec >= bio->bi_io_vec);
  1326. bio_put(bio);
  1327. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1328. return 0;
  1329. #endif
  1330. }
  1331. static struct bio *
  1332. extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
  1333. gfp_t gfp_flags)
  1334. {
  1335. struct bio *bio;
  1336. bio = bio_alloc(gfp_flags, nr_vecs);
  1337. if (bio == NULL && (current->flags & PF_MEMALLOC)) {
  1338. while (!bio && (nr_vecs /= 2))
  1339. bio = bio_alloc(gfp_flags, nr_vecs);
  1340. }
  1341. if (bio) {
  1342. bio->bi_bdev = bdev;
  1343. bio->bi_sector = first_sector;
  1344. }
  1345. return bio;
  1346. }
  1347. static int submit_one_bio(int rw, struct bio *bio)
  1348. {
  1349. int ret = 0;
  1350. bio_get(bio);
  1351. submit_bio(rw, bio);
  1352. if (bio_flagged(bio, BIO_EOPNOTSUPP))
  1353. ret = -EOPNOTSUPP;
  1354. bio_put(bio);
  1355. return ret;
  1356. }
  1357. static int submit_extent_page(int rw, struct extent_map_tree *tree,
  1358. struct page *page, sector_t sector,
  1359. size_t size, unsigned long offset,
  1360. struct block_device *bdev,
  1361. struct bio **bio_ret,
  1362. int max_pages,
  1363. bio_end_io_t end_io_func)
  1364. {
  1365. int ret = 0;
  1366. struct bio *bio;
  1367. int nr;
  1368. if (bio_ret && *bio_ret) {
  1369. bio = *bio_ret;
  1370. if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
  1371. bio_add_page(bio, page, size, offset) < size) {
  1372. ret = submit_one_bio(rw, bio);
  1373. bio = NULL;
  1374. } else {
  1375. return 0;
  1376. }
  1377. }
  1378. nr = min(max_pages, bio_get_nr_vecs(bdev));
  1379. bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
  1380. if (!bio) {
  1381. printk("failed to allocate bio nr %d\n", nr);
  1382. }
  1383. bio_add_page(bio, page, size, offset);
  1384. bio->bi_end_io = end_io_func;
  1385. bio->bi_private = tree;
  1386. if (bio_ret) {
  1387. *bio_ret = bio;
  1388. } else {
  1389. ret = submit_one_bio(rw, bio);
  1390. }
  1391. return ret;
  1392. }
  1393. void set_page_extent_mapped(struct page *page)
  1394. {
  1395. if (!PagePrivate(page)) {
  1396. SetPagePrivate(page);
  1397. WARN_ON(!page->mapping->a_ops->invalidatepage);
  1398. set_page_private(page, EXTENT_PAGE_PRIVATE);
  1399. page_cache_get(page);
  1400. }
  1401. }
  1402. /*
  1403. * basic readpage implementation. Locked extent state structs are inserted
  1404. * into the tree that are removed when the IO is done (by the end_io
  1405. * handlers)
  1406. */
  1407. int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
  1408. get_extent_t *get_extent)
  1409. {
  1410. struct inode *inode = page->mapping->host;
  1411. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1412. u64 page_end = start + PAGE_CACHE_SIZE - 1;
  1413. u64 end;
  1414. u64 cur = start;
  1415. u64 extent_offset;
  1416. u64 last_byte = i_size_read(inode);
  1417. u64 block_start;
  1418. u64 cur_end;
  1419. sector_t sector;
  1420. struct extent_map *em;
  1421. struct block_device *bdev;
  1422. int ret;
  1423. int nr = 0;
  1424. size_t page_offset = 0;
  1425. size_t iosize;
  1426. size_t blocksize = inode->i_sb->s_blocksize;
  1427. set_page_extent_mapped(page);
  1428. end = page_end;
  1429. lock_extent(tree, start, end, GFP_NOFS);
  1430. while (cur <= end) {
  1431. if (cur >= last_byte) {
  1432. iosize = PAGE_CACHE_SIZE - page_offset;
  1433. zero_user_page(page, page_offset, iosize, KM_USER0);
  1434. set_extent_uptodate(tree, cur, cur + iosize - 1,
  1435. GFP_NOFS);
  1436. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1437. break;
  1438. }
  1439. em = get_extent(inode, page, page_offset, cur, end, 0);
  1440. if (IS_ERR(em) || !em) {
  1441. SetPageError(page);
  1442. unlock_extent(tree, cur, end, GFP_NOFS);
  1443. break;
  1444. }
  1445. extent_offset = cur - em->start;
  1446. BUG_ON(em->end < cur);
  1447. BUG_ON(end < cur);
  1448. iosize = min(em->end - cur, end - cur) + 1;
  1449. cur_end = min(em->end, end);
  1450. iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
  1451. sector = (em->block_start + extent_offset) >> 9;
  1452. bdev = em->bdev;
  1453. block_start = em->block_start;
  1454. free_extent_map(em);
  1455. em = NULL;
  1456. /* we've found a hole, just zero and go on */
  1457. if (block_start == EXTENT_MAP_HOLE) {
  1458. zero_user_page(page, page_offset, iosize, KM_USER0);
  1459. set_extent_uptodate(tree, cur, cur + iosize - 1,
  1460. GFP_NOFS);
  1461. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1462. cur = cur + iosize;
  1463. page_offset += iosize;
  1464. continue;
  1465. }
  1466. /* the get_extent function already copied into the page */
  1467. if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
  1468. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1469. cur = cur + iosize;
  1470. page_offset += iosize;
  1471. continue;
  1472. }
  1473. ret = 0;
  1474. if (tree->ops && tree->ops->readpage_io_hook) {
  1475. ret = tree->ops->readpage_io_hook(page, cur,
  1476. cur + iosize - 1);
  1477. }
  1478. if (!ret) {
  1479. ret = submit_extent_page(READ, tree, page,
  1480. sector, iosize, page_offset,
  1481. bdev, NULL, 1,
  1482. end_bio_extent_readpage);
  1483. }
  1484. if (ret)
  1485. SetPageError(page);
  1486. cur = cur + iosize;
  1487. page_offset += iosize;
  1488. nr++;
  1489. }
  1490. if (!nr) {
  1491. if (!PageError(page))
  1492. SetPageUptodate(page);
  1493. unlock_page(page);
  1494. }
  1495. return 0;
  1496. }
  1497. EXPORT_SYMBOL(extent_read_full_page);
  1498. /*
  1499. * the writepage semantics are similar to regular writepage. extent
  1500. * records are inserted to lock ranges in the tree, and as dirty areas
  1501. * are found, they are marked writeback. Then the lock bits are removed
  1502. * and the end_io handler clears the writeback ranges
  1503. */
  1504. static int __extent_writepage(struct page *page, struct writeback_control *wbc,
  1505. void *data)
  1506. {
  1507. struct inode *inode = page->mapping->host;
  1508. struct extent_page_data *epd = data;
  1509. struct extent_map_tree *tree = epd->tree;
  1510. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1511. u64 page_end = start + PAGE_CACHE_SIZE - 1;
  1512. u64 end;
  1513. u64 cur = start;
  1514. u64 extent_offset;
  1515. u64 last_byte = i_size_read(inode);
  1516. u64 block_start;
  1517. u64 iosize;
  1518. sector_t sector;
  1519. struct extent_map *em;
  1520. struct block_device *bdev;
  1521. int ret;
  1522. int nr = 0;
  1523. size_t page_offset = 0;
  1524. size_t blocksize;
  1525. loff_t i_size = i_size_read(inode);
  1526. unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
  1527. u64 nr_delalloc;
  1528. u64 delalloc_end;
  1529. WARN_ON(!PageLocked(page));
  1530. if (page->index > end_index) {
  1531. clear_extent_dirty(tree, start, page_end, GFP_NOFS);
  1532. unlock_page(page);
  1533. return 0;
  1534. }
  1535. if (page->index == end_index) {
  1536. size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
  1537. zero_user_page(page, offset,
  1538. PAGE_CACHE_SIZE - offset, KM_USER0);
  1539. }
  1540. set_page_extent_mapped(page);
  1541. lock_extent(tree, start, page_end, GFP_NOFS);
  1542. nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
  1543. &delalloc_end,
  1544. 128 * 1024 * 1024);
  1545. if (nr_delalloc) {
  1546. tree->ops->fill_delalloc(inode, start, delalloc_end);
  1547. if (delalloc_end >= page_end + 1) {
  1548. clear_extent_bit(tree, page_end + 1, delalloc_end,
  1549. EXTENT_LOCKED | EXTENT_DELALLOC,
  1550. 1, 0, GFP_NOFS);
  1551. }
  1552. clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
  1553. 0, 0, GFP_NOFS);
  1554. if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
  1555. printk("found delalloc bits after clear extent_bit\n");
  1556. }
  1557. } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
  1558. printk("found delalloc bits after find_delalloc_range returns 0\n");
  1559. }
  1560. end = page_end;
  1561. if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
  1562. printk("found delalloc bits after lock_extent\n");
  1563. }
  1564. if (last_byte <= start) {
  1565. clear_extent_dirty(tree, start, page_end, GFP_NOFS);
  1566. goto done;
  1567. }
  1568. set_extent_uptodate(tree, start, page_end, GFP_NOFS);
  1569. blocksize = inode->i_sb->s_blocksize;
  1570. while (cur <= end) {
  1571. if (cur >= last_byte) {
  1572. clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
  1573. break;
  1574. }
  1575. em = epd->get_extent(inode, page, page_offset, cur, end, 1);
  1576. if (IS_ERR(em) || !em) {
  1577. SetPageError(page);
  1578. break;
  1579. }
  1580. extent_offset = cur - em->start;
  1581. BUG_ON(em->end < cur);
  1582. BUG_ON(end < cur);
  1583. iosize = min(em->end - cur, end - cur) + 1;
  1584. iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
  1585. sector = (em->block_start + extent_offset) >> 9;
  1586. bdev = em->bdev;
  1587. block_start = em->block_start;
  1588. free_extent_map(em);
  1589. em = NULL;
  1590. if (block_start == EXTENT_MAP_HOLE ||
  1591. block_start == EXTENT_MAP_INLINE) {
  1592. clear_extent_dirty(tree, cur,
  1593. cur + iosize - 1, GFP_NOFS);
  1594. cur = cur + iosize;
  1595. page_offset += iosize;
  1596. continue;
  1597. }
  1598. /* leave this out until we have a page_mkwrite call */
  1599. if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
  1600. EXTENT_DIRTY, 0)) {
  1601. cur = cur + iosize;
  1602. page_offset += iosize;
  1603. continue;
  1604. }
  1605. clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
  1606. if (tree->ops && tree->ops->writepage_io_hook) {
  1607. ret = tree->ops->writepage_io_hook(page, cur,
  1608. cur + iosize - 1);
  1609. } else {
  1610. ret = 0;
  1611. }
  1612. if (ret)
  1613. SetPageError(page);
  1614. else {
  1615. unsigned long nr = end_index + 1;
  1616. set_range_writeback(tree, cur, cur + iosize - 1);
  1617. ret = submit_extent_page(WRITE, tree, page, sector,
  1618. iosize, page_offset, bdev,
  1619. &epd->bio, nr,
  1620. end_bio_extent_writepage);
  1621. if (ret)
  1622. SetPageError(page);
  1623. }
  1624. cur = cur + iosize;
  1625. page_offset += iosize;
  1626. nr++;
  1627. }
  1628. done:
  1629. unlock_extent(tree, start, page_end, GFP_NOFS);
  1630. unlock_page(page);
  1631. return 0;
  1632. }
  1633. int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
  1634. get_extent_t *get_extent,
  1635. struct writeback_control *wbc)
  1636. {
  1637. int ret;
  1638. struct extent_page_data epd = {
  1639. .bio = NULL,
  1640. .tree = tree,
  1641. .get_extent = get_extent,
  1642. };
  1643. ret = __extent_writepage(page, wbc, &epd);
  1644. if (epd.bio)
  1645. submit_one_bio(WRITE, epd.bio);
  1646. return ret;
  1647. }
  1648. EXPORT_SYMBOL(extent_write_full_page);
  1649. int extent_writepages(struct extent_map_tree *tree,
  1650. struct address_space *mapping,
  1651. get_extent_t *get_extent,
  1652. struct writeback_control *wbc)
  1653. {
  1654. int ret;
  1655. struct extent_page_data epd = {
  1656. .bio = NULL,
  1657. .tree = tree,
  1658. .get_extent = get_extent,
  1659. };
  1660. ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
  1661. if (epd.bio)
  1662. submit_one_bio(WRITE, epd.bio);
  1663. return ret;
  1664. }
  1665. EXPORT_SYMBOL(extent_writepages);
  1666. /*
  1667. * basic invalidatepage code, this waits on any locked or writeback
  1668. * ranges corresponding to the page, and then deletes any extent state
  1669. * records from the tree
  1670. */
  1671. int extent_invalidatepage(struct extent_map_tree *tree,
  1672. struct page *page, unsigned long offset)
  1673. {
  1674. u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
  1675. u64 end = start + PAGE_CACHE_SIZE - 1;
  1676. size_t blocksize = page->mapping->host->i_sb->s_blocksize;
  1677. start += (offset + blocksize -1) & ~(blocksize - 1);
  1678. if (start > end)
  1679. return 0;
  1680. lock_extent(tree, start, end, GFP_NOFS);
  1681. wait_on_extent_writeback(tree, start, end);
  1682. clear_extent_bit(tree, start, end,
  1683. EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
  1684. 1, 1, GFP_NOFS);
  1685. return 0;
  1686. }
  1687. EXPORT_SYMBOL(extent_invalidatepage);
  1688. /*
  1689. * simple commit_write call, set_range_dirty is used to mark both
  1690. * the pages and the extent records as dirty
  1691. */
  1692. int extent_commit_write(struct extent_map_tree *tree,
  1693. struct inode *inode, struct page *page,
  1694. unsigned from, unsigned to)
  1695. {
  1696. loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
  1697. set_page_extent_mapped(page);
  1698. set_page_dirty(page);
  1699. if (pos > inode->i_size) {
  1700. i_size_write(inode, pos);
  1701. mark_inode_dirty(inode);
  1702. }
  1703. return 0;
  1704. }
  1705. EXPORT_SYMBOL(extent_commit_write);
  1706. int extent_prepare_write(struct extent_map_tree *tree,
  1707. struct inode *inode, struct page *page,
  1708. unsigned from, unsigned to, get_extent_t *get_extent)
  1709. {
  1710. u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  1711. u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
  1712. u64 block_start;
  1713. u64 orig_block_start;
  1714. u64 block_end;
  1715. u64 cur_end;
  1716. struct extent_map *em;
  1717. unsigned blocksize = 1 << inode->i_blkbits;
  1718. size_t page_offset = 0;
  1719. size_t block_off_start;
  1720. size_t block_off_end;
  1721. int err = 0;
  1722. int iocount = 0;
  1723. int ret = 0;
  1724. int isnew;
  1725. set_page_extent_mapped(page);
  1726. block_start = (page_start + from) & ~((u64)blocksize - 1);
  1727. block_end = (page_start + to - 1) | (blocksize - 1);
  1728. orig_block_start = block_start;
  1729. lock_extent(tree, page_start, page_end, GFP_NOFS);
  1730. while(block_start <= block_end) {
  1731. em = get_extent(inode, page, page_offset, block_start,
  1732. block_end, 1);
  1733. if (IS_ERR(em) || !em) {
  1734. goto err;
  1735. }
  1736. cur_end = min(block_end, em->end);
  1737. block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
  1738. block_off_end = block_off_start + blocksize;
  1739. isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
  1740. if (!PageUptodate(page) && isnew &&
  1741. (block_off_end > to || block_off_start < from)) {
  1742. void *kaddr;
  1743. kaddr = kmap_atomic(page, KM_USER0);
  1744. if (block_off_end > to)
  1745. memset(kaddr + to, 0, block_off_end - to);
  1746. if (block_off_start < from)
  1747. memset(kaddr + block_off_start, 0,
  1748. from - block_off_start);
  1749. flush_dcache_page(page);
  1750. kunmap_atomic(kaddr, KM_USER0);
  1751. }
  1752. if (!isnew && !PageUptodate(page) &&
  1753. (block_off_end > to || block_off_start < from) &&
  1754. !test_range_bit(tree, block_start, cur_end,
  1755. EXTENT_UPTODATE, 1)) {
  1756. u64 sector;
  1757. u64 extent_offset = block_start - em->start;
  1758. size_t iosize;
  1759. sector = (em->block_start + extent_offset) >> 9;
  1760. iosize = (cur_end - block_start + blocksize - 1) &
  1761. ~((u64)blocksize - 1);
  1762. /*
  1763. * we've already got the extent locked, but we
  1764. * need to split the state such that our end_bio
  1765. * handler can clear the lock.
  1766. */
  1767. set_extent_bit(tree, block_start,
  1768. block_start + iosize - 1,
  1769. EXTENT_LOCKED, 0, NULL, GFP_NOFS);
  1770. ret = submit_extent_page(READ, tree, page,
  1771. sector, iosize, page_offset, em->bdev,
  1772. NULL, 1,
  1773. end_bio_extent_preparewrite);
  1774. iocount++;
  1775. block_start = block_start + iosize;
  1776. } else {
  1777. set_extent_uptodate(tree, block_start, cur_end,
  1778. GFP_NOFS);
  1779. unlock_extent(tree, block_start, cur_end, GFP_NOFS);
  1780. block_start = cur_end + 1;
  1781. }
  1782. page_offset = block_start & (PAGE_CACHE_SIZE - 1);
  1783. free_extent_map(em);
  1784. }
  1785. if (iocount) {
  1786. wait_extent_bit(tree, orig_block_start,
  1787. block_end, EXTENT_LOCKED);
  1788. }
  1789. check_page_uptodate(tree, page);
  1790. err:
  1791. /* FIXME, zero out newly allocated blocks on error */
  1792. return err;
  1793. }
  1794. EXPORT_SYMBOL(extent_prepare_write);
  1795. /*
  1796. * a helper for releasepage. As long as there are no locked extents
  1797. * in the range corresponding to the page, both state records and extent
  1798. * map records are removed
  1799. */
  1800. int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
  1801. {
  1802. struct extent_map *em;
  1803. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1804. u64 end = start + PAGE_CACHE_SIZE - 1;
  1805. u64 orig_start = start;
  1806. int ret = 1;
  1807. while (start <= end) {
  1808. em = lookup_extent_mapping(tree, start, end);
  1809. if (!em || IS_ERR(em))
  1810. break;
  1811. if (!test_range_bit(tree, em->start, em->end,
  1812. EXTENT_LOCKED, 0)) {
  1813. remove_extent_mapping(tree, em);
  1814. /* once for the rb tree */
  1815. free_extent_map(em);
  1816. }
  1817. start = em->end + 1;
  1818. /* once for us */
  1819. free_extent_map(em);
  1820. }
  1821. if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
  1822. ret = 0;
  1823. else
  1824. clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
  1825. 1, 1, GFP_NOFS);
  1826. return ret;
  1827. }
  1828. EXPORT_SYMBOL(try_release_extent_mapping);
  1829. sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
  1830. get_extent_t *get_extent)
  1831. {
  1832. struct inode *inode = mapping->host;
  1833. u64 start = iblock << inode->i_blkbits;
  1834. u64 end = start + (1 << inode->i_blkbits) - 1;
  1835. sector_t sector = 0;
  1836. struct extent_map *em;
  1837. em = get_extent(inode, NULL, 0, start, end, 0);
  1838. if (!em || IS_ERR(em))
  1839. return 0;
  1840. if (em->block_start == EXTENT_MAP_INLINE ||
  1841. em->block_start == EXTENT_MAP_HOLE)
  1842. goto out;
  1843. sector = (em->block_start + start - em->start) >> inode->i_blkbits;
  1844. out:
  1845. free_extent_map(em);
  1846. return sector;
  1847. }
  1848. static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
  1849. {
  1850. if (list_empty(&eb->lru)) {
  1851. extent_buffer_get(eb);
  1852. list_add(&eb->lru, &tree->buffer_lru);
  1853. tree->lru_size++;
  1854. if (tree->lru_size >= BUFFER_LRU_MAX) {
  1855. struct extent_buffer *rm;
  1856. rm = list_entry(tree->buffer_lru.prev,
  1857. struct extent_buffer, lru);
  1858. tree->lru_size--;
  1859. list_del(&rm->lru);
  1860. free_extent_buffer(rm);
  1861. }
  1862. } else
  1863. list_move(&eb->lru, &tree->buffer_lru);
  1864. return 0;
  1865. }
  1866. static struct extent_buffer *find_lru(struct extent_map_tree *tree,
  1867. u64 start, unsigned long len)
  1868. {
  1869. struct list_head *lru = &tree->buffer_lru;
  1870. struct list_head *cur = lru->next;
  1871. struct extent_buffer *eb;
  1872. if (list_empty(lru))
  1873. return NULL;
  1874. do {
  1875. eb = list_entry(cur, struct extent_buffer, lru);
  1876. if (eb->start == start && eb->len == len) {
  1877. extent_buffer_get(eb);
  1878. return eb;
  1879. }
  1880. cur = cur->next;
  1881. } while (cur != lru);
  1882. return NULL;
  1883. }
  1884. static inline unsigned long num_extent_pages(u64 start, u64 len)
  1885. {
  1886. return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
  1887. (start >> PAGE_CACHE_SHIFT);
  1888. }
  1889. static inline struct page *extent_buffer_page(struct extent_buffer *eb,
  1890. unsigned long i)
  1891. {
  1892. struct page *p;
  1893. struct address_space *mapping;
  1894. if (i == 0)
  1895. return eb->first_page;
  1896. i += eb->start >> PAGE_CACHE_SHIFT;
  1897. mapping = eb->first_page->mapping;
  1898. read_lock_irq(&mapping->tree_lock);
  1899. p = radix_tree_lookup(&mapping->page_tree, i);
  1900. read_unlock_irq(&mapping->tree_lock);
  1901. return p;
  1902. }
  1903. static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
  1904. u64 start,
  1905. unsigned long len,
  1906. gfp_t mask)
  1907. {
  1908. struct extent_buffer *eb = NULL;
  1909. spin_lock(&tree->lru_lock);
  1910. eb = find_lru(tree, start, len);
  1911. spin_unlock(&tree->lru_lock);
  1912. if (eb) {
  1913. return eb;
  1914. }
  1915. eb = kmem_cache_zalloc(extent_buffer_cache, mask);
  1916. INIT_LIST_HEAD(&eb->lru);
  1917. eb->start = start;
  1918. eb->len = len;
  1919. atomic_set(&eb->refs, 1);
  1920. return eb;
  1921. }
  1922. static void __free_extent_buffer(struct extent_buffer *eb)
  1923. {
  1924. kmem_cache_free(extent_buffer_cache, eb);
  1925. }
  1926. struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
  1927. u64 start, unsigned long len,
  1928. struct page *page0,
  1929. gfp_t mask)
  1930. {
  1931. unsigned long num_pages = num_extent_pages(start, len);
  1932. unsigned long i;
  1933. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1934. struct extent_buffer *eb;
  1935. struct page *p;
  1936. struct address_space *mapping = tree->mapping;
  1937. int uptodate = 1;
  1938. eb = __alloc_extent_buffer(tree, start, len, mask);
  1939. if (!eb || IS_ERR(eb))
  1940. return NULL;
  1941. if (eb->flags & EXTENT_BUFFER_FILLED)
  1942. goto lru_add;
  1943. if (page0) {
  1944. eb->first_page = page0;
  1945. i = 1;
  1946. index++;
  1947. page_cache_get(page0);
  1948. mark_page_accessed(page0);
  1949. set_page_extent_mapped(page0);
  1950. set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
  1951. len << 2);
  1952. } else {
  1953. i = 0;
  1954. }
  1955. for (; i < num_pages; i++, index++) {
  1956. p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
  1957. if (!p) {
  1958. WARN_ON(1);
  1959. goto fail;
  1960. }
  1961. set_page_extent_mapped(p);
  1962. mark_page_accessed(p);
  1963. if (i == 0) {
  1964. eb->first_page = p;
  1965. set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
  1966. len << 2);
  1967. } else {
  1968. set_page_private(p, EXTENT_PAGE_PRIVATE);
  1969. }
  1970. if (!PageUptodate(p))
  1971. uptodate = 0;
  1972. unlock_page(p);
  1973. }
  1974. if (uptodate)
  1975. eb->flags |= EXTENT_UPTODATE;
  1976. eb->flags |= EXTENT_BUFFER_FILLED;
  1977. lru_add:
  1978. spin_lock(&tree->lru_lock);
  1979. add_lru(tree, eb);
  1980. spin_unlock(&tree->lru_lock);
  1981. return eb;
  1982. fail:
  1983. if (!atomic_dec_and_test(&eb->refs))
  1984. return NULL;
  1985. for (index = 0; index < i; index++) {
  1986. page_cache_release(extent_buffer_page(eb, index));
  1987. }
  1988. __free_extent_buffer(eb);
  1989. return NULL;
  1990. }
  1991. EXPORT_SYMBOL(alloc_extent_buffer);
  1992. struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
  1993. u64 start, unsigned long len,
  1994. gfp_t mask)
  1995. {
  1996. unsigned long num_pages = num_extent_pages(start, len);
  1997. unsigned long i;
  1998. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1999. struct extent_buffer *eb;
  2000. struct page *p;
  2001. struct address_space *mapping = tree->mapping;
  2002. int uptodate = 1;
  2003. eb = __alloc_extent_buffer(tree, start, len, mask);
  2004. if (!eb || IS_ERR(eb))
  2005. return NULL;
  2006. if (eb->flags & EXTENT_BUFFER_FILLED)
  2007. goto lru_add;
  2008. for (i = 0; i < num_pages; i++, index++) {
  2009. p = find_lock_page(mapping, index);
  2010. if (!p) {
  2011. goto fail;
  2012. }
  2013. set_page_extent_mapped(p);
  2014. mark_page_accessed(p);
  2015. if (i == 0) {
  2016. eb->first_page = p;
  2017. set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
  2018. len << 2);
  2019. } else {
  2020. set_page_private(p, EXTENT_PAGE_PRIVATE);
  2021. }
  2022. if (!PageUptodate(p))
  2023. uptodate = 0;
  2024. unlock_page(p);
  2025. }
  2026. if (uptodate)
  2027. eb->flags |= EXTENT_UPTODATE;
  2028. eb->flags |= EXTENT_BUFFER_FILLED;
  2029. lru_add:
  2030. spin_lock(&tree->lru_lock);
  2031. add_lru(tree, eb);
  2032. spin_unlock(&tree->lru_lock);
  2033. return eb;
  2034. fail:
  2035. if (!atomic_dec_and_test(&eb->refs))
  2036. return NULL;
  2037. for (index = 0; index < i; index++) {
  2038. page_cache_release(extent_buffer_page(eb, index));
  2039. }
  2040. __free_extent_buffer(eb);
  2041. return NULL;
  2042. }
  2043. EXPORT_SYMBOL(find_extent_buffer);
  2044. void free_extent_buffer(struct extent_buffer *eb)
  2045. {
  2046. unsigned long i;
  2047. unsigned long num_pages;
  2048. if (!eb)
  2049. return;
  2050. if (!atomic_dec_and_test(&eb->refs))
  2051. return;
  2052. num_pages = num_extent_pages(eb->start, eb->len);
  2053. for (i = 0; i < num_pages; i++) {
  2054. page_cache_release(extent_buffer_page(eb, i));
  2055. }
  2056. __free_extent_buffer(eb);
  2057. }
  2058. EXPORT_SYMBOL(free_extent_buffer);
  2059. int clear_extent_buffer_dirty(struct extent_map_tree *tree,
  2060. struct extent_buffer *eb)
  2061. {
  2062. int set;
  2063. unsigned long i;
  2064. unsigned long num_pages;
  2065. struct page *page;
  2066. u64 start = eb->start;
  2067. u64 end = start + eb->len - 1;
  2068. set = clear_extent_dirty(tree, start, end, GFP_NOFS);
  2069. num_pages = num_extent_pages(eb->start, eb->len);
  2070. for (i = 0; i < num_pages; i++) {
  2071. page = extent_buffer_page(eb, i);
  2072. lock_page(page);
  2073. /*
  2074. * if we're on the last page or the first page and the
  2075. * block isn't aligned on a page boundary, do extra checks
  2076. * to make sure we don't clean page that is partially dirty
  2077. */
  2078. if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
  2079. ((i == num_pages - 1) &&
  2080. ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
  2081. start = (u64)page->index << PAGE_CACHE_SHIFT;
  2082. end = start + PAGE_CACHE_SIZE - 1;
  2083. if (test_range_bit(tree, start, end,
  2084. EXTENT_DIRTY, 0)) {
  2085. unlock_page(page);
  2086. continue;
  2087. }
  2088. }
  2089. clear_page_dirty_for_io(page);
  2090. unlock_page(page);
  2091. }
  2092. return 0;
  2093. }
  2094. EXPORT_SYMBOL(clear_extent_buffer_dirty);
  2095. int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
  2096. struct extent_buffer *eb)
  2097. {
  2098. return wait_on_extent_writeback(tree, eb->start,
  2099. eb->start + eb->len - 1);
  2100. }
  2101. EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
  2102. int set_extent_buffer_dirty(struct extent_map_tree *tree,
  2103. struct extent_buffer *eb)
  2104. {
  2105. unsigned long i;
  2106. unsigned long num_pages;
  2107. num_pages = num_extent_pages(eb->start, eb->len);
  2108. for (i = 0; i < num_pages; i++) {
  2109. struct page *page = extent_buffer_page(eb, i);
  2110. /* writepage may need to do something special for the
  2111. * first page, we have to make sure page->private is
  2112. * properly set. releasepage may drop page->private
  2113. * on us if the page isn't already dirty.
  2114. */
  2115. if (i == 0) {
  2116. lock_page(page);
  2117. set_page_private(page,
  2118. EXTENT_PAGE_PRIVATE_FIRST_PAGE |
  2119. eb->len << 2);
  2120. }
  2121. __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
  2122. if (i == 0)
  2123. unlock_page(page);
  2124. }
  2125. return set_extent_dirty(tree, eb->start,
  2126. eb->start + eb->len - 1, GFP_NOFS);
  2127. }
  2128. EXPORT_SYMBOL(set_extent_buffer_dirty);
  2129. int set_extent_buffer_uptodate(struct extent_map_tree *tree,
  2130. struct extent_buffer *eb)
  2131. {
  2132. unsigned long i;
  2133. struct page *page;
  2134. unsigned long num_pages;
  2135. num_pages = num_extent_pages(eb->start, eb->len);
  2136. set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
  2137. GFP_NOFS);
  2138. for (i = 0; i < num_pages; i++) {
  2139. page = extent_buffer_page(eb, i);
  2140. if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
  2141. ((i == num_pages - 1) &&
  2142. ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
  2143. check_page_uptodate(tree, page);
  2144. continue;
  2145. }
  2146. SetPageUptodate(page);
  2147. }
  2148. return 0;
  2149. }
  2150. EXPORT_SYMBOL(set_extent_buffer_uptodate);
  2151. int extent_buffer_uptodate(struct extent_map_tree *tree,
  2152. struct extent_buffer *eb)
  2153. {
  2154. if (eb->flags & EXTENT_UPTODATE)
  2155. return 1;
  2156. return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
  2157. EXTENT_UPTODATE, 1);
  2158. }
  2159. EXPORT_SYMBOL(extent_buffer_uptodate);
  2160. int read_extent_buffer_pages(struct extent_map_tree *tree,
  2161. struct extent_buffer *eb,
  2162. u64 start,
  2163. int wait)
  2164. {
  2165. unsigned long i;
  2166. unsigned long start_i;
  2167. struct page *page;
  2168. int err;
  2169. int ret = 0;
  2170. unsigned long num_pages;
  2171. if (eb->flags & EXTENT_UPTODATE)
  2172. return 0;
  2173. if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
  2174. EXTENT_UPTODATE, 1)) {
  2175. return 0;
  2176. }
  2177. if (start) {
  2178. WARN_ON(start < eb->start);
  2179. start_i = (start >> PAGE_CACHE_SHIFT) -
  2180. (eb->start >> PAGE_CACHE_SHIFT);
  2181. } else {
  2182. start_i = 0;
  2183. }
  2184. num_pages = num_extent_pages(eb->start, eb->len);
  2185. for (i = start_i; i < num_pages; i++) {
  2186. page = extent_buffer_page(eb, i);
  2187. if (PageUptodate(page)) {
  2188. continue;
  2189. }
  2190. if (!wait) {
  2191. if (TestSetPageLocked(page)) {
  2192. continue;
  2193. }
  2194. } else {
  2195. lock_page(page);
  2196. }
  2197. if (!PageUptodate(page)) {
  2198. err = page->mapping->a_ops->readpage(NULL, page);
  2199. if (err) {
  2200. ret = err;
  2201. }
  2202. } else {
  2203. unlock_page(page);
  2204. }
  2205. }
  2206. if (ret || !wait) {
  2207. return ret;
  2208. }
  2209. for (i = start_i; i < num_pages; i++) {
  2210. page = extent_buffer_page(eb, i);
  2211. wait_on_page_locked(page);
  2212. if (!PageUptodate(page)) {
  2213. ret = -EIO;
  2214. }
  2215. }
  2216. if (!ret)
  2217. eb->flags |= EXTENT_UPTODATE;
  2218. return ret;
  2219. }
  2220. EXPORT_SYMBOL(read_extent_buffer_pages);
  2221. void read_extent_buffer(struct extent_buffer *eb, void *dstv,
  2222. unsigned long start,
  2223. unsigned long len)
  2224. {
  2225. size_t cur;
  2226. size_t offset;
  2227. struct page *page;
  2228. char *kaddr;
  2229. char *dst = (char *)dstv;
  2230. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2231. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2232. unsigned long num_pages = num_extent_pages(eb->start, eb->len);
  2233. WARN_ON(start > eb->len);
  2234. WARN_ON(start + len > eb->start + eb->len);
  2235. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2236. while(len > 0) {
  2237. page = extent_buffer_page(eb, i);
  2238. if (!PageUptodate(page)) {
  2239. printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
  2240. WARN_ON(1);
  2241. }
  2242. WARN_ON(!PageUptodate(page));
  2243. cur = min(len, (PAGE_CACHE_SIZE - offset));
  2244. kaddr = kmap_atomic(page, KM_USER1);
  2245. memcpy(dst, kaddr + offset, cur);
  2246. kunmap_atomic(kaddr, KM_USER1);
  2247. dst += cur;
  2248. len -= cur;
  2249. offset = 0;
  2250. i++;
  2251. }
  2252. }
  2253. EXPORT_SYMBOL(read_extent_buffer);
  2254. int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
  2255. unsigned long min_len, char **token, char **map,
  2256. unsigned long *map_start,
  2257. unsigned long *map_len, int km)
  2258. {
  2259. size_t offset = start & (PAGE_CACHE_SIZE - 1);
  2260. char *kaddr;
  2261. struct page *p;
  2262. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2263. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2264. unsigned long end_i = (start_offset + start + min_len - 1) >>
  2265. PAGE_CACHE_SHIFT;
  2266. if (i != end_i)
  2267. return -EINVAL;
  2268. if (i == 0) {
  2269. offset = start_offset;
  2270. *map_start = 0;
  2271. } else {
  2272. offset = 0;
  2273. *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
  2274. }
  2275. if (start + min_len > eb->len) {
  2276. printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
  2277. WARN_ON(1);
  2278. }
  2279. p = extent_buffer_page(eb, i);
  2280. WARN_ON(!PageUptodate(p));
  2281. kaddr = kmap_atomic(p, km);
  2282. *token = kaddr;
  2283. *map = kaddr + offset;
  2284. *map_len = PAGE_CACHE_SIZE - offset;
  2285. return 0;
  2286. }
  2287. EXPORT_SYMBOL(map_private_extent_buffer);
  2288. int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
  2289. unsigned long min_len,
  2290. char **token, char **map,
  2291. unsigned long *map_start,
  2292. unsigned long *map_len, int km)
  2293. {
  2294. int err;
  2295. int save = 0;
  2296. if (eb->map_token) {
  2297. unmap_extent_buffer(eb, eb->map_token, km);
  2298. eb->map_token = NULL;
  2299. save = 1;
  2300. }
  2301. err = map_private_extent_buffer(eb, start, min_len, token, map,
  2302. map_start, map_len, km);
  2303. if (!err && save) {
  2304. eb->map_token = *token;
  2305. eb->kaddr = *map;
  2306. eb->map_start = *map_start;
  2307. eb->map_len = *map_len;
  2308. }
  2309. return err;
  2310. }
  2311. EXPORT_SYMBOL(map_extent_buffer);
  2312. void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
  2313. {
  2314. kunmap_atomic(token, km);
  2315. }
  2316. EXPORT_SYMBOL(unmap_extent_buffer);
  2317. int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
  2318. unsigned long start,
  2319. unsigned long len)
  2320. {
  2321. size_t cur;
  2322. size_t offset;
  2323. struct page *page;
  2324. char *kaddr;
  2325. char *ptr = (char *)ptrv;
  2326. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2327. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2328. int ret = 0;
  2329. WARN_ON(start > eb->len);
  2330. WARN_ON(start + len > eb->start + eb->len);
  2331. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2332. while(len > 0) {
  2333. page = extent_buffer_page(eb, i);
  2334. WARN_ON(!PageUptodate(page));
  2335. cur = min(len, (PAGE_CACHE_SIZE - offset));
  2336. kaddr = kmap_atomic(page, KM_USER0);
  2337. ret = memcmp(ptr, kaddr + offset, cur);
  2338. kunmap_atomic(kaddr, KM_USER0);
  2339. if (ret)
  2340. break;
  2341. ptr += cur;
  2342. len -= cur;
  2343. offset = 0;
  2344. i++;
  2345. }
  2346. return ret;
  2347. }
  2348. EXPORT_SYMBOL(memcmp_extent_buffer);
  2349. void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
  2350. unsigned long start, unsigned long len)
  2351. {
  2352. size_t cur;
  2353. size_t offset;
  2354. struct page *page;
  2355. char *kaddr;
  2356. char *src = (char *)srcv;
  2357. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2358. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2359. WARN_ON(start > eb->len);
  2360. WARN_ON(start + len > eb->start + eb->len);
  2361. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2362. while(len > 0) {
  2363. page = extent_buffer_page(eb, i);
  2364. WARN_ON(!PageUptodate(page));
  2365. cur = min(len, PAGE_CACHE_SIZE - offset);
  2366. kaddr = kmap_atomic(page, KM_USER1);
  2367. memcpy(kaddr + offset, src, cur);
  2368. kunmap_atomic(kaddr, KM_USER1);
  2369. src += cur;
  2370. len -= cur;
  2371. offset = 0;
  2372. i++;
  2373. }
  2374. }
  2375. EXPORT_SYMBOL(write_extent_buffer);
  2376. void memset_extent_buffer(struct extent_buffer *eb, char c,
  2377. unsigned long start, unsigned long len)
  2378. {
  2379. size_t cur;
  2380. size_t offset;
  2381. struct page *page;
  2382. char *kaddr;
  2383. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2384. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2385. WARN_ON(start > eb->len);
  2386. WARN_ON(start + len > eb->start + eb->len);
  2387. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2388. while(len > 0) {
  2389. page = extent_buffer_page(eb, i);
  2390. WARN_ON(!PageUptodate(page));
  2391. cur = min(len, PAGE_CACHE_SIZE - offset);
  2392. kaddr = kmap_atomic(page, KM_USER0);
  2393. memset(kaddr + offset, c, cur);
  2394. kunmap_atomic(kaddr, KM_USER0);
  2395. len -= cur;
  2396. offset = 0;
  2397. i++;
  2398. }
  2399. }
  2400. EXPORT_SYMBOL(memset_extent_buffer);
  2401. void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
  2402. unsigned long dst_offset, unsigned long src_offset,
  2403. unsigned long len)
  2404. {
  2405. u64 dst_len = dst->len;
  2406. size_t cur;
  2407. size_t offset;
  2408. struct page *page;
  2409. char *kaddr;
  2410. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  2411. unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
  2412. WARN_ON(src->len != dst_len);
  2413. offset = (start_offset + dst_offset) &
  2414. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2415. while(len > 0) {
  2416. page = extent_buffer_page(dst, i);
  2417. WARN_ON(!PageUptodate(page));
  2418. cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
  2419. kaddr = kmap_atomic(page, KM_USER0);
  2420. read_extent_buffer(src, kaddr + offset, src_offset, cur);
  2421. kunmap_atomic(kaddr, KM_USER0);
  2422. src_offset += cur;
  2423. len -= cur;
  2424. offset = 0;
  2425. i++;
  2426. }
  2427. }
  2428. EXPORT_SYMBOL(copy_extent_buffer);
  2429. static void move_pages(struct page *dst_page, struct page *src_page,
  2430. unsigned long dst_off, unsigned long src_off,
  2431. unsigned long len)
  2432. {
  2433. char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
  2434. if (dst_page == src_page) {
  2435. memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
  2436. } else {
  2437. char *src_kaddr = kmap_atomic(src_page, KM_USER1);
  2438. char *p = dst_kaddr + dst_off + len;
  2439. char *s = src_kaddr + src_off + len;
  2440. while (len--)
  2441. *--p = *--s;
  2442. kunmap_atomic(src_kaddr, KM_USER1);
  2443. }
  2444. kunmap_atomic(dst_kaddr, KM_USER0);
  2445. }
  2446. static void copy_pages(struct page *dst_page, struct page *src_page,
  2447. unsigned long dst_off, unsigned long src_off,
  2448. unsigned long len)
  2449. {
  2450. char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
  2451. char *src_kaddr;
  2452. if (dst_page != src_page)
  2453. src_kaddr = kmap_atomic(src_page, KM_USER1);
  2454. else
  2455. src_kaddr = dst_kaddr;
  2456. memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
  2457. kunmap_atomic(dst_kaddr, KM_USER0);
  2458. if (dst_page != src_page)
  2459. kunmap_atomic(src_kaddr, KM_USER1);
  2460. }
  2461. void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  2462. unsigned long src_offset, unsigned long len)
  2463. {
  2464. size_t cur;
  2465. size_t dst_off_in_page;
  2466. size_t src_off_in_page;
  2467. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  2468. unsigned long dst_i;
  2469. unsigned long src_i;
  2470. if (src_offset + len > dst->len) {
  2471. printk("memmove bogus src_offset %lu move len %lu len %lu\n",
  2472. src_offset, len, dst->len);
  2473. BUG_ON(1);
  2474. }
  2475. if (dst_offset + len > dst->len) {
  2476. printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
  2477. dst_offset, len, dst->len);
  2478. BUG_ON(1);
  2479. }
  2480. while(len > 0) {
  2481. dst_off_in_page = (start_offset + dst_offset) &
  2482. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2483. src_off_in_page = (start_offset + src_offset) &
  2484. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2485. dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
  2486. src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
  2487. cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
  2488. src_off_in_page));
  2489. cur = min_t(unsigned long, cur,
  2490. (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
  2491. copy_pages(extent_buffer_page(dst, dst_i),
  2492. extent_buffer_page(dst, src_i),
  2493. dst_off_in_page, src_off_in_page, cur);
  2494. src_offset += cur;
  2495. dst_offset += cur;
  2496. len -= cur;
  2497. }
  2498. }
  2499. EXPORT_SYMBOL(memcpy_extent_buffer);
  2500. void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  2501. unsigned long src_offset, unsigned long len)
  2502. {
  2503. size_t cur;
  2504. size_t dst_off_in_page;
  2505. size_t src_off_in_page;
  2506. unsigned long dst_end = dst_offset + len - 1;
  2507. unsigned long src_end = src_offset + len - 1;
  2508. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  2509. unsigned long dst_i;
  2510. unsigned long src_i;
  2511. if (src_offset + len > dst->len) {
  2512. printk("memmove bogus src_offset %lu move len %lu len %lu\n",
  2513. src_offset, len, dst->len);
  2514. BUG_ON(1);
  2515. }
  2516. if (dst_offset + len > dst->len) {
  2517. printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
  2518. dst_offset, len, dst->len);
  2519. BUG_ON(1);
  2520. }
  2521. if (dst_offset < src_offset) {
  2522. memcpy_extent_buffer(dst, dst_offset, src_offset, len);
  2523. return;
  2524. }
  2525. while(len > 0) {
  2526. dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
  2527. src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
  2528. dst_off_in_page = (start_offset + dst_end) &
  2529. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2530. src_off_in_page = (start_offset + src_end) &
  2531. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2532. cur = min_t(unsigned long, len, src_off_in_page + 1);
  2533. cur = min(cur, dst_off_in_page + 1);
  2534. move_pages(extent_buffer_page(dst, dst_i),
  2535. extent_buffer_page(dst, src_i),
  2536. dst_off_in_page - cur + 1,
  2537. src_off_in_page - cur + 1, cur);
  2538. dst_end -= cur;
  2539. src_end -= cur;
  2540. len -= cur;
  2541. }
  2542. }
  2543. EXPORT_SYMBOL(memmove_extent_buffer);