extent_map.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746
  1. #include <linux/bitops.h>
  2. #include <linux/slab.h>
  3. #include <linux/bio.h>
  4. #include <linux/mm.h>
  5. #include <linux/gfp.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/page-flags.h>
  8. #include <linux/module.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/swap.h>
  12. #include <linux/version.h>
  13. #include "extent_map.h"
  14. /* temporary define until extent_map moves out of btrfs */
  15. struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
  16. unsigned long extra_flags,
  17. void (*ctor)(void *, struct kmem_cache *,
  18. unsigned long));
  19. static struct kmem_cache *extent_map_cache;
  20. static struct kmem_cache *extent_state_cache;
  21. static struct kmem_cache *extent_buffer_cache;
  22. static LIST_HEAD(buffers);
  23. static LIST_HEAD(states);
  24. static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
  25. #define BUFFER_LRU_MAX 64
  26. struct tree_entry {
  27. u64 start;
  28. u64 end;
  29. int in_tree;
  30. struct rb_node rb_node;
  31. };
  32. void __init extent_map_init(void)
  33. {
  34. extent_map_cache = btrfs_cache_create("extent_map",
  35. sizeof(struct extent_map), 0,
  36. NULL);
  37. extent_state_cache = btrfs_cache_create("extent_state",
  38. sizeof(struct extent_state), 0,
  39. NULL);
  40. extent_buffer_cache = btrfs_cache_create("extent_buffers",
  41. sizeof(struct extent_buffer), 0,
  42. NULL);
  43. }
  44. void __exit extent_map_exit(void)
  45. {
  46. struct extent_state *state;
  47. while (!list_empty(&states)) {
  48. state = list_entry(states.next, struct extent_state, list);
  49. printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
  50. list_del(&state->list);
  51. kmem_cache_free(extent_state_cache, state);
  52. }
  53. if (extent_map_cache)
  54. kmem_cache_destroy(extent_map_cache);
  55. if (extent_state_cache)
  56. kmem_cache_destroy(extent_state_cache);
  57. if (extent_buffer_cache)
  58. kmem_cache_destroy(extent_buffer_cache);
  59. }
  60. void extent_map_tree_init(struct extent_map_tree *tree,
  61. struct address_space *mapping, gfp_t mask)
  62. {
  63. tree->map.rb_node = NULL;
  64. tree->state.rb_node = NULL;
  65. tree->ops = NULL;
  66. rwlock_init(&tree->lock);
  67. spin_lock_init(&tree->lru_lock);
  68. tree->mapping = mapping;
  69. INIT_LIST_HEAD(&tree->buffer_lru);
  70. tree->lru_size = 0;
  71. }
  72. EXPORT_SYMBOL(extent_map_tree_init);
  73. void extent_map_tree_empty_lru(struct extent_map_tree *tree)
  74. {
  75. struct extent_buffer *eb;
  76. while(!list_empty(&tree->buffer_lru)) {
  77. eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
  78. lru);
  79. list_del(&eb->lru);
  80. free_extent_buffer(eb);
  81. }
  82. }
  83. EXPORT_SYMBOL(extent_map_tree_empty_lru);
  84. struct extent_map *alloc_extent_map(gfp_t mask)
  85. {
  86. struct extent_map *em;
  87. em = kmem_cache_alloc(extent_map_cache, mask);
  88. if (!em || IS_ERR(em))
  89. return em;
  90. em->in_tree = 0;
  91. atomic_set(&em->refs, 1);
  92. return em;
  93. }
  94. EXPORT_SYMBOL(alloc_extent_map);
  95. void free_extent_map(struct extent_map *em)
  96. {
  97. if (!em)
  98. return;
  99. if (atomic_dec_and_test(&em->refs)) {
  100. WARN_ON(em->in_tree);
  101. kmem_cache_free(extent_map_cache, em);
  102. }
  103. }
  104. EXPORT_SYMBOL(free_extent_map);
  105. struct extent_state *alloc_extent_state(gfp_t mask)
  106. {
  107. struct extent_state *state;
  108. unsigned long flags;
  109. state = kmem_cache_alloc(extent_state_cache, mask);
  110. if (!state || IS_ERR(state))
  111. return state;
  112. state->state = 0;
  113. state->in_tree = 0;
  114. state->private = 0;
  115. spin_lock_irqsave(&state_lock, flags);
  116. list_add(&state->list, &states);
  117. spin_unlock_irqrestore(&state_lock, flags);
  118. atomic_set(&state->refs, 1);
  119. init_waitqueue_head(&state->wq);
  120. return state;
  121. }
  122. EXPORT_SYMBOL(alloc_extent_state);
  123. void free_extent_state(struct extent_state *state)
  124. {
  125. unsigned long flags;
  126. if (!state)
  127. return;
  128. if (atomic_dec_and_test(&state->refs)) {
  129. WARN_ON(state->in_tree);
  130. spin_lock_irqsave(&state_lock, flags);
  131. list_del(&state->list);
  132. spin_unlock_irqrestore(&state_lock, flags);
  133. kmem_cache_free(extent_state_cache, state);
  134. }
  135. }
  136. EXPORT_SYMBOL(free_extent_state);
  137. static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
  138. struct rb_node *node)
  139. {
  140. struct rb_node ** p = &root->rb_node;
  141. struct rb_node * parent = NULL;
  142. struct tree_entry *entry;
  143. while(*p) {
  144. parent = *p;
  145. entry = rb_entry(parent, struct tree_entry, rb_node);
  146. if (offset < entry->start)
  147. p = &(*p)->rb_left;
  148. else if (offset > entry->end)
  149. p = &(*p)->rb_right;
  150. else
  151. return parent;
  152. }
  153. entry = rb_entry(node, struct tree_entry, rb_node);
  154. entry->in_tree = 1;
  155. rb_link_node(node, parent, p);
  156. rb_insert_color(node, root);
  157. return NULL;
  158. }
  159. static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
  160. struct rb_node **prev_ret)
  161. {
  162. struct rb_node * n = root->rb_node;
  163. struct rb_node *prev = NULL;
  164. struct tree_entry *entry;
  165. struct tree_entry *prev_entry = NULL;
  166. while(n) {
  167. entry = rb_entry(n, struct tree_entry, rb_node);
  168. prev = n;
  169. prev_entry = entry;
  170. if (offset < entry->start)
  171. n = n->rb_left;
  172. else if (offset > entry->end)
  173. n = n->rb_right;
  174. else
  175. return n;
  176. }
  177. if (!prev_ret)
  178. return NULL;
  179. while(prev && offset > prev_entry->end) {
  180. prev = rb_next(prev);
  181. prev_entry = rb_entry(prev, struct tree_entry, rb_node);
  182. }
  183. *prev_ret = prev;
  184. return NULL;
  185. }
  186. static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
  187. {
  188. struct rb_node *prev;
  189. struct rb_node *ret;
  190. ret = __tree_search(root, offset, &prev);
  191. if (!ret)
  192. return prev;
  193. return ret;
  194. }
  195. static int tree_delete(struct rb_root *root, u64 offset)
  196. {
  197. struct rb_node *node;
  198. struct tree_entry *entry;
  199. node = __tree_search(root, offset, NULL);
  200. if (!node)
  201. return -ENOENT;
  202. entry = rb_entry(node, struct tree_entry, rb_node);
  203. entry->in_tree = 0;
  204. rb_erase(node, root);
  205. return 0;
  206. }
  207. /*
  208. * add_extent_mapping tries a simple backward merge with existing
  209. * mappings. The extent_map struct passed in will be inserted into
  210. * the tree directly (no copies made, just a reference taken).
  211. */
  212. int add_extent_mapping(struct extent_map_tree *tree,
  213. struct extent_map *em)
  214. {
  215. int ret = 0;
  216. struct extent_map *prev = NULL;
  217. struct rb_node *rb;
  218. write_lock_irq(&tree->lock);
  219. rb = tree_insert(&tree->map, em->end, &em->rb_node);
  220. if (rb) {
  221. prev = rb_entry(rb, struct extent_map, rb_node);
  222. printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
  223. ret = -EEXIST;
  224. goto out;
  225. }
  226. atomic_inc(&em->refs);
  227. if (em->start != 0) {
  228. rb = rb_prev(&em->rb_node);
  229. if (rb)
  230. prev = rb_entry(rb, struct extent_map, rb_node);
  231. if (prev && prev->end + 1 == em->start &&
  232. ((em->block_start == EXTENT_MAP_HOLE &&
  233. prev->block_start == EXTENT_MAP_HOLE) ||
  234. (em->block_start == EXTENT_MAP_INLINE &&
  235. prev->block_start == EXTENT_MAP_INLINE) ||
  236. (em->block_start == EXTENT_MAP_DELALLOC &&
  237. prev->block_start == EXTENT_MAP_DELALLOC) ||
  238. (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
  239. em->block_start == prev->block_end + 1))) {
  240. em->start = prev->start;
  241. em->block_start = prev->block_start;
  242. rb_erase(&prev->rb_node, &tree->map);
  243. prev->in_tree = 0;
  244. free_extent_map(prev);
  245. }
  246. }
  247. out:
  248. write_unlock_irq(&tree->lock);
  249. return ret;
  250. }
  251. EXPORT_SYMBOL(add_extent_mapping);
  252. /*
  253. * lookup_extent_mapping returns the first extent_map struct in the
  254. * tree that intersects the [start, end] (inclusive) range. There may
  255. * be additional objects in the tree that intersect, so check the object
  256. * returned carefully to make sure you don't need additional lookups.
  257. */
  258. struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
  259. u64 start, u64 end)
  260. {
  261. struct extent_map *em;
  262. struct rb_node *rb_node;
  263. read_lock_irq(&tree->lock);
  264. rb_node = tree_search(&tree->map, start);
  265. if (!rb_node) {
  266. em = NULL;
  267. goto out;
  268. }
  269. if (IS_ERR(rb_node)) {
  270. em = ERR_PTR(PTR_ERR(rb_node));
  271. goto out;
  272. }
  273. em = rb_entry(rb_node, struct extent_map, rb_node);
  274. if (em->end < start || em->start > end) {
  275. em = NULL;
  276. goto out;
  277. }
  278. atomic_inc(&em->refs);
  279. out:
  280. read_unlock_irq(&tree->lock);
  281. return em;
  282. }
  283. EXPORT_SYMBOL(lookup_extent_mapping);
  284. /*
  285. * removes an extent_map struct from the tree. No reference counts are
  286. * dropped, and no checks are done to see if the range is in use
  287. */
  288. int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
  289. {
  290. int ret;
  291. write_lock_irq(&tree->lock);
  292. ret = tree_delete(&tree->map, em->end);
  293. write_unlock_irq(&tree->lock);
  294. return ret;
  295. }
  296. EXPORT_SYMBOL(remove_extent_mapping);
  297. /*
  298. * utility function to look for merge candidates inside a given range.
  299. * Any extents with matching state are merged together into a single
  300. * extent in the tree. Extents with EXTENT_IO in their state field
  301. * are not merged because the end_io handlers need to be able to do
  302. * operations on them without sleeping (or doing allocations/splits).
  303. *
  304. * This should be called with the tree lock held.
  305. */
  306. static int merge_state(struct extent_map_tree *tree,
  307. struct extent_state *state)
  308. {
  309. struct extent_state *other;
  310. struct rb_node *other_node;
  311. if (state->state & EXTENT_IOBITS)
  312. return 0;
  313. other_node = rb_prev(&state->rb_node);
  314. if (other_node) {
  315. other = rb_entry(other_node, struct extent_state, rb_node);
  316. if (other->end == state->start - 1 &&
  317. other->state == state->state) {
  318. state->start = other->start;
  319. other->in_tree = 0;
  320. rb_erase(&other->rb_node, &tree->state);
  321. free_extent_state(other);
  322. }
  323. }
  324. other_node = rb_next(&state->rb_node);
  325. if (other_node) {
  326. other = rb_entry(other_node, struct extent_state, rb_node);
  327. if (other->start == state->end + 1 &&
  328. other->state == state->state) {
  329. other->start = state->start;
  330. state->in_tree = 0;
  331. rb_erase(&state->rb_node, &tree->state);
  332. free_extent_state(state);
  333. }
  334. }
  335. return 0;
  336. }
  337. /*
  338. * insert an extent_state struct into the tree. 'bits' are set on the
  339. * struct before it is inserted.
  340. *
  341. * This may return -EEXIST if the extent is already there, in which case the
  342. * state struct is freed.
  343. *
  344. * The tree lock is not taken internally. This is a utility function and
  345. * probably isn't what you want to call (see set/clear_extent_bit).
  346. */
  347. static int insert_state(struct extent_map_tree *tree,
  348. struct extent_state *state, u64 start, u64 end,
  349. int bits)
  350. {
  351. struct rb_node *node;
  352. if (end < start) {
  353. printk("end < start %Lu %Lu\n", end, start);
  354. WARN_ON(1);
  355. }
  356. state->state |= bits;
  357. state->start = start;
  358. state->end = end;
  359. node = tree_insert(&tree->state, end, &state->rb_node);
  360. if (node) {
  361. struct extent_state *found;
  362. found = rb_entry(node, struct extent_state, rb_node);
  363. printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
  364. free_extent_state(state);
  365. return -EEXIST;
  366. }
  367. merge_state(tree, state);
  368. return 0;
  369. }
  370. /*
  371. * split a given extent state struct in two, inserting the preallocated
  372. * struct 'prealloc' as the newly created second half. 'split' indicates an
  373. * offset inside 'orig' where it should be split.
  374. *
  375. * Before calling,
  376. * the tree has 'orig' at [orig->start, orig->end]. After calling, there
  377. * are two extent state structs in the tree:
  378. * prealloc: [orig->start, split - 1]
  379. * orig: [ split, orig->end ]
  380. *
  381. * The tree locks are not taken by this function. They need to be held
  382. * by the caller.
  383. */
  384. static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
  385. struct extent_state *prealloc, u64 split)
  386. {
  387. struct rb_node *node;
  388. prealloc->start = orig->start;
  389. prealloc->end = split - 1;
  390. prealloc->state = orig->state;
  391. orig->start = split;
  392. node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
  393. if (node) {
  394. struct extent_state *found;
  395. found = rb_entry(node, struct extent_state, rb_node);
  396. printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
  397. free_extent_state(prealloc);
  398. return -EEXIST;
  399. }
  400. return 0;
  401. }
  402. /*
  403. * utility function to clear some bits in an extent state struct.
  404. * it will optionally wake up any one waiting on this state (wake == 1), or
  405. * forcibly remove the state from the tree (delete == 1).
  406. *
  407. * If no bits are set on the state struct after clearing things, the
  408. * struct is freed and removed from the tree
  409. */
  410. static int clear_state_bit(struct extent_map_tree *tree,
  411. struct extent_state *state, int bits, int wake,
  412. int delete)
  413. {
  414. int ret = state->state & bits;
  415. state->state &= ~bits;
  416. if (wake)
  417. wake_up(&state->wq);
  418. if (delete || state->state == 0) {
  419. if (state->in_tree) {
  420. rb_erase(&state->rb_node, &tree->state);
  421. state->in_tree = 0;
  422. free_extent_state(state);
  423. } else {
  424. WARN_ON(1);
  425. }
  426. } else {
  427. merge_state(tree, state);
  428. }
  429. return ret;
  430. }
  431. /*
  432. * clear some bits on a range in the tree. This may require splitting
  433. * or inserting elements in the tree, so the gfp mask is used to
  434. * indicate which allocations or sleeping are allowed.
  435. *
  436. * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
  437. * the given range from the tree regardless of state (ie for truncate).
  438. *
  439. * the range [start, end] is inclusive.
  440. *
  441. * This takes the tree lock, and returns < 0 on error, > 0 if any of the
  442. * bits were already set, or zero if none of the bits were already set.
  443. */
  444. int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
  445. int bits, int wake, int delete, gfp_t mask)
  446. {
  447. struct extent_state *state;
  448. struct extent_state *prealloc = NULL;
  449. struct rb_node *node;
  450. unsigned long flags;
  451. int err;
  452. int set = 0;
  453. again:
  454. if (!prealloc && (mask & __GFP_WAIT)) {
  455. prealloc = alloc_extent_state(mask);
  456. if (!prealloc)
  457. return -ENOMEM;
  458. }
  459. write_lock_irqsave(&tree->lock, flags);
  460. /*
  461. * this search will find the extents that end after
  462. * our range starts
  463. */
  464. node = tree_search(&tree->state, start);
  465. if (!node)
  466. goto out;
  467. state = rb_entry(node, struct extent_state, rb_node);
  468. if (state->start > end)
  469. goto out;
  470. WARN_ON(state->end < start);
  471. /*
  472. * | ---- desired range ---- |
  473. * | state | or
  474. * | ------------- state -------------- |
  475. *
  476. * We need to split the extent we found, and may flip
  477. * bits on second half.
  478. *
  479. * If the extent we found extends past our range, we
  480. * just split and search again. It'll get split again
  481. * the next time though.
  482. *
  483. * If the extent we found is inside our range, we clear
  484. * the desired bit on it.
  485. */
  486. if (state->start < start) {
  487. err = split_state(tree, state, prealloc, start);
  488. BUG_ON(err == -EEXIST);
  489. prealloc = NULL;
  490. if (err)
  491. goto out;
  492. if (state->end <= end) {
  493. start = state->end + 1;
  494. set |= clear_state_bit(tree, state, bits,
  495. wake, delete);
  496. } else {
  497. start = state->start;
  498. }
  499. goto search_again;
  500. }
  501. /*
  502. * | ---- desired range ---- |
  503. * | state |
  504. * We need to split the extent, and clear the bit
  505. * on the first half
  506. */
  507. if (state->start <= end && state->end > end) {
  508. err = split_state(tree, state, prealloc, end + 1);
  509. BUG_ON(err == -EEXIST);
  510. if (wake)
  511. wake_up(&state->wq);
  512. set |= clear_state_bit(tree, prealloc, bits,
  513. wake, delete);
  514. prealloc = NULL;
  515. goto out;
  516. }
  517. start = state->end + 1;
  518. set |= clear_state_bit(tree, state, bits, wake, delete);
  519. goto search_again;
  520. out:
  521. write_unlock_irqrestore(&tree->lock, flags);
  522. if (prealloc)
  523. free_extent_state(prealloc);
  524. return set;
  525. search_again:
  526. if (start > end)
  527. goto out;
  528. write_unlock_irqrestore(&tree->lock, flags);
  529. if (mask & __GFP_WAIT)
  530. cond_resched();
  531. goto again;
  532. }
  533. EXPORT_SYMBOL(clear_extent_bit);
  534. static int wait_on_state(struct extent_map_tree *tree,
  535. struct extent_state *state)
  536. {
  537. DEFINE_WAIT(wait);
  538. prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
  539. read_unlock_irq(&tree->lock);
  540. schedule();
  541. read_lock_irq(&tree->lock);
  542. finish_wait(&state->wq, &wait);
  543. return 0;
  544. }
  545. /*
  546. * waits for one or more bits to clear on a range in the state tree.
  547. * The range [start, end] is inclusive.
  548. * The tree lock is taken by this function
  549. */
  550. int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
  551. {
  552. struct extent_state *state;
  553. struct rb_node *node;
  554. read_lock_irq(&tree->lock);
  555. again:
  556. while (1) {
  557. /*
  558. * this search will find all the extents that end after
  559. * our range starts
  560. */
  561. node = tree_search(&tree->state, start);
  562. if (!node)
  563. break;
  564. state = rb_entry(node, struct extent_state, rb_node);
  565. if (state->start > end)
  566. goto out;
  567. if (state->state & bits) {
  568. start = state->start;
  569. atomic_inc(&state->refs);
  570. wait_on_state(tree, state);
  571. free_extent_state(state);
  572. goto again;
  573. }
  574. start = state->end + 1;
  575. if (start > end)
  576. break;
  577. if (need_resched()) {
  578. read_unlock_irq(&tree->lock);
  579. cond_resched();
  580. read_lock_irq(&tree->lock);
  581. }
  582. }
  583. out:
  584. read_unlock_irq(&tree->lock);
  585. return 0;
  586. }
  587. EXPORT_SYMBOL(wait_extent_bit);
  588. /*
  589. * set some bits on a range in the tree. This may require allocations
  590. * or sleeping, so the gfp mask is used to indicate what is allowed.
  591. *
  592. * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
  593. * range already has the desired bits set. The start of the existing
  594. * range is returned in failed_start in this case.
  595. *
  596. * [start, end] is inclusive
  597. * This takes the tree lock.
  598. */
  599. int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
  600. int exclusive, u64 *failed_start, gfp_t mask)
  601. {
  602. struct extent_state *state;
  603. struct extent_state *prealloc = NULL;
  604. struct rb_node *node;
  605. unsigned long flags;
  606. int err = 0;
  607. int set;
  608. u64 last_start;
  609. u64 last_end;
  610. again:
  611. if (!prealloc && (mask & __GFP_WAIT)) {
  612. prealloc = alloc_extent_state(mask);
  613. if (!prealloc)
  614. return -ENOMEM;
  615. }
  616. write_lock_irqsave(&tree->lock, flags);
  617. /*
  618. * this search will find all the extents that end after
  619. * our range starts.
  620. */
  621. node = tree_search(&tree->state, start);
  622. if (!node) {
  623. err = insert_state(tree, prealloc, start, end, bits);
  624. prealloc = NULL;
  625. BUG_ON(err == -EEXIST);
  626. goto out;
  627. }
  628. state = rb_entry(node, struct extent_state, rb_node);
  629. last_start = state->start;
  630. last_end = state->end;
  631. /*
  632. * | ---- desired range ---- |
  633. * | state |
  634. *
  635. * Just lock what we found and keep going
  636. */
  637. if (state->start == start && state->end <= end) {
  638. set = state->state & bits;
  639. if (set && exclusive) {
  640. *failed_start = state->start;
  641. err = -EEXIST;
  642. goto out;
  643. }
  644. state->state |= bits;
  645. start = state->end + 1;
  646. merge_state(tree, state);
  647. goto search_again;
  648. }
  649. /*
  650. * | ---- desired range ---- |
  651. * | state |
  652. * or
  653. * | ------------- state -------------- |
  654. *
  655. * We need to split the extent we found, and may flip bits on
  656. * second half.
  657. *
  658. * If the extent we found extends past our
  659. * range, we just split and search again. It'll get split
  660. * again the next time though.
  661. *
  662. * If the extent we found is inside our range, we set the
  663. * desired bit on it.
  664. */
  665. if (state->start < start) {
  666. set = state->state & bits;
  667. if (exclusive && set) {
  668. *failed_start = start;
  669. err = -EEXIST;
  670. goto out;
  671. }
  672. err = split_state(tree, state, prealloc, start);
  673. BUG_ON(err == -EEXIST);
  674. prealloc = NULL;
  675. if (err)
  676. goto out;
  677. if (state->end <= end) {
  678. state->state |= bits;
  679. start = state->end + 1;
  680. merge_state(tree, state);
  681. } else {
  682. start = state->start;
  683. }
  684. goto search_again;
  685. }
  686. /*
  687. * | ---- desired range ---- |
  688. * | state | or | state |
  689. *
  690. * There's a hole, we need to insert something in it and
  691. * ignore the extent we found.
  692. */
  693. if (state->start > start) {
  694. u64 this_end;
  695. if (end < last_start)
  696. this_end = end;
  697. else
  698. this_end = last_start -1;
  699. err = insert_state(tree, prealloc, start, this_end,
  700. bits);
  701. prealloc = NULL;
  702. BUG_ON(err == -EEXIST);
  703. if (err)
  704. goto out;
  705. start = this_end + 1;
  706. goto search_again;
  707. }
  708. /*
  709. * | ---- desired range ---- |
  710. * | state |
  711. * We need to split the extent, and set the bit
  712. * on the first half
  713. */
  714. if (state->start <= end && state->end > end) {
  715. set = state->state & bits;
  716. if (exclusive && set) {
  717. *failed_start = start;
  718. err = -EEXIST;
  719. goto out;
  720. }
  721. err = split_state(tree, state, prealloc, end + 1);
  722. BUG_ON(err == -EEXIST);
  723. prealloc->state |= bits;
  724. merge_state(tree, prealloc);
  725. prealloc = NULL;
  726. goto out;
  727. }
  728. goto search_again;
  729. out:
  730. write_unlock_irqrestore(&tree->lock, flags);
  731. if (prealloc)
  732. free_extent_state(prealloc);
  733. return err;
  734. search_again:
  735. if (start > end)
  736. goto out;
  737. write_unlock_irqrestore(&tree->lock, flags);
  738. if (mask & __GFP_WAIT)
  739. cond_resched();
  740. goto again;
  741. }
  742. EXPORT_SYMBOL(set_extent_bit);
  743. /* wrappers around set/clear extent bit */
  744. int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
  745. gfp_t mask)
  746. {
  747. return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
  748. mask);
  749. }
  750. EXPORT_SYMBOL(set_extent_dirty);
  751. int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
  752. int bits, gfp_t mask)
  753. {
  754. return set_extent_bit(tree, start, end, bits, 0, NULL,
  755. mask);
  756. }
  757. EXPORT_SYMBOL(set_extent_bits);
  758. int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
  759. int bits, gfp_t mask)
  760. {
  761. return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
  762. }
  763. EXPORT_SYMBOL(clear_extent_bits);
  764. int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
  765. gfp_t mask)
  766. {
  767. return set_extent_bit(tree, start, end,
  768. EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
  769. mask);
  770. }
  771. EXPORT_SYMBOL(set_extent_delalloc);
  772. int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
  773. gfp_t mask)
  774. {
  775. return clear_extent_bit(tree, start, end,
  776. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
  777. }
  778. EXPORT_SYMBOL(clear_extent_dirty);
  779. int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
  780. gfp_t mask)
  781. {
  782. return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
  783. mask);
  784. }
  785. EXPORT_SYMBOL(set_extent_new);
  786. int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
  787. gfp_t mask)
  788. {
  789. return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
  790. }
  791. EXPORT_SYMBOL(clear_extent_new);
  792. int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
  793. gfp_t mask)
  794. {
  795. return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
  796. mask);
  797. }
  798. EXPORT_SYMBOL(set_extent_uptodate);
  799. int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
  800. gfp_t mask)
  801. {
  802. return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
  803. }
  804. EXPORT_SYMBOL(clear_extent_uptodate);
  805. int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
  806. gfp_t mask)
  807. {
  808. return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
  809. 0, NULL, mask);
  810. }
  811. EXPORT_SYMBOL(set_extent_writeback);
  812. int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
  813. gfp_t mask)
  814. {
  815. return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
  816. }
  817. EXPORT_SYMBOL(clear_extent_writeback);
  818. int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
  819. {
  820. return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
  821. }
  822. EXPORT_SYMBOL(wait_on_extent_writeback);
  823. /*
  824. * locks a range in ascending order, waiting for any locked regions
  825. * it hits on the way. [start,end] are inclusive, and this will sleep.
  826. */
  827. int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
  828. {
  829. int err;
  830. u64 failed_start;
  831. while (1) {
  832. err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
  833. &failed_start, mask);
  834. if (err == -EEXIST && (mask & __GFP_WAIT)) {
  835. wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
  836. start = failed_start;
  837. } else {
  838. break;
  839. }
  840. WARN_ON(start > end);
  841. }
  842. return err;
  843. }
  844. EXPORT_SYMBOL(lock_extent);
  845. int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
  846. gfp_t mask)
  847. {
  848. return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
  849. }
  850. EXPORT_SYMBOL(unlock_extent);
  851. /*
  852. * helper function to set pages and extents in the tree dirty
  853. */
  854. int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
  855. {
  856. unsigned long index = start >> PAGE_CACHE_SHIFT;
  857. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  858. struct page *page;
  859. while (index <= end_index) {
  860. page = find_get_page(tree->mapping, index);
  861. BUG_ON(!page);
  862. __set_page_dirty_nobuffers(page);
  863. page_cache_release(page);
  864. index++;
  865. }
  866. set_extent_dirty(tree, start, end, GFP_NOFS);
  867. return 0;
  868. }
  869. EXPORT_SYMBOL(set_range_dirty);
  870. /*
  871. * helper function to set both pages and extents in the tree writeback
  872. */
  873. int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
  874. {
  875. unsigned long index = start >> PAGE_CACHE_SHIFT;
  876. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  877. struct page *page;
  878. while (index <= end_index) {
  879. page = find_get_page(tree->mapping, index);
  880. BUG_ON(!page);
  881. set_page_writeback(page);
  882. page_cache_release(page);
  883. index++;
  884. }
  885. set_extent_writeback(tree, start, end, GFP_NOFS);
  886. return 0;
  887. }
  888. EXPORT_SYMBOL(set_range_writeback);
  889. int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
  890. u64 *start_ret, u64 *end_ret, int bits)
  891. {
  892. struct rb_node *node;
  893. struct extent_state *state;
  894. int ret = 1;
  895. read_lock_irq(&tree->lock);
  896. /*
  897. * this search will find all the extents that end after
  898. * our range starts.
  899. */
  900. node = tree_search(&tree->state, start);
  901. if (!node || IS_ERR(node)) {
  902. goto out;
  903. }
  904. while(1) {
  905. state = rb_entry(node, struct extent_state, rb_node);
  906. if (state->end >= start && (state->state & bits)) {
  907. *start_ret = state->start;
  908. *end_ret = state->end;
  909. ret = 0;
  910. break;
  911. }
  912. node = rb_next(node);
  913. if (!node)
  914. break;
  915. }
  916. out:
  917. read_unlock_irq(&tree->lock);
  918. return ret;
  919. }
  920. EXPORT_SYMBOL(find_first_extent_bit);
  921. u64 find_lock_delalloc_range(struct extent_map_tree *tree,
  922. u64 start, u64 lock_start, u64 *end, u64 max_bytes)
  923. {
  924. struct rb_node *node;
  925. struct extent_state *state;
  926. u64 cur_start = start;
  927. u64 found = 0;
  928. u64 total_bytes = 0;
  929. write_lock_irq(&tree->lock);
  930. /*
  931. * this search will find all the extents that end after
  932. * our range starts.
  933. */
  934. search_again:
  935. node = tree_search(&tree->state, cur_start);
  936. if (!node || IS_ERR(node)) {
  937. goto out;
  938. }
  939. while(1) {
  940. state = rb_entry(node, struct extent_state, rb_node);
  941. if (state->start != cur_start) {
  942. goto out;
  943. }
  944. if (!(state->state & EXTENT_DELALLOC)) {
  945. goto out;
  946. }
  947. if (state->start >= lock_start) {
  948. if (state->state & EXTENT_LOCKED) {
  949. DEFINE_WAIT(wait);
  950. atomic_inc(&state->refs);
  951. prepare_to_wait(&state->wq, &wait,
  952. TASK_UNINTERRUPTIBLE);
  953. write_unlock_irq(&tree->lock);
  954. schedule();
  955. write_lock_irq(&tree->lock);
  956. finish_wait(&state->wq, &wait);
  957. free_extent_state(state);
  958. goto search_again;
  959. }
  960. state->state |= EXTENT_LOCKED;
  961. }
  962. found++;
  963. *end = state->end;
  964. cur_start = state->end + 1;
  965. node = rb_next(node);
  966. if (!node)
  967. break;
  968. total_bytes += state->end - state->start + 1;
  969. if (total_bytes >= max_bytes)
  970. break;
  971. }
  972. out:
  973. write_unlock_irq(&tree->lock);
  974. return found;
  975. }
  976. /*
  977. * helper function to lock both pages and extents in the tree.
  978. * pages must be locked first.
  979. */
  980. int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
  981. {
  982. unsigned long index = start >> PAGE_CACHE_SHIFT;
  983. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  984. struct page *page;
  985. int err;
  986. while (index <= end_index) {
  987. page = grab_cache_page(tree->mapping, index);
  988. if (!page) {
  989. err = -ENOMEM;
  990. goto failed;
  991. }
  992. if (IS_ERR(page)) {
  993. err = PTR_ERR(page);
  994. goto failed;
  995. }
  996. index++;
  997. }
  998. lock_extent(tree, start, end, GFP_NOFS);
  999. return 0;
  1000. failed:
  1001. /*
  1002. * we failed above in getting the page at 'index', so we undo here
  1003. * up to but not including the page at 'index'
  1004. */
  1005. end_index = index;
  1006. index = start >> PAGE_CACHE_SHIFT;
  1007. while (index < end_index) {
  1008. page = find_get_page(tree->mapping, index);
  1009. unlock_page(page);
  1010. page_cache_release(page);
  1011. index++;
  1012. }
  1013. return err;
  1014. }
  1015. EXPORT_SYMBOL(lock_range);
  1016. /*
  1017. * helper function to unlock both pages and extents in the tree.
  1018. */
  1019. int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
  1020. {
  1021. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1022. unsigned long end_index = end >> PAGE_CACHE_SHIFT;
  1023. struct page *page;
  1024. while (index <= end_index) {
  1025. page = find_get_page(tree->mapping, index);
  1026. unlock_page(page);
  1027. page_cache_release(page);
  1028. index++;
  1029. }
  1030. unlock_extent(tree, start, end, GFP_NOFS);
  1031. return 0;
  1032. }
  1033. EXPORT_SYMBOL(unlock_range);
  1034. int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
  1035. {
  1036. struct rb_node *node;
  1037. struct extent_state *state;
  1038. int ret = 0;
  1039. write_lock_irq(&tree->lock);
  1040. /*
  1041. * this search will find all the extents that end after
  1042. * our range starts.
  1043. */
  1044. node = tree_search(&tree->state, start);
  1045. if (!node || IS_ERR(node)) {
  1046. ret = -ENOENT;
  1047. goto out;
  1048. }
  1049. state = rb_entry(node, struct extent_state, rb_node);
  1050. if (state->start != start) {
  1051. ret = -ENOENT;
  1052. goto out;
  1053. }
  1054. state->private = private;
  1055. out:
  1056. write_unlock_irq(&tree->lock);
  1057. return ret;
  1058. }
  1059. int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
  1060. {
  1061. struct rb_node *node;
  1062. struct extent_state *state;
  1063. int ret = 0;
  1064. read_lock_irq(&tree->lock);
  1065. /*
  1066. * this search will find all the extents that end after
  1067. * our range starts.
  1068. */
  1069. node = tree_search(&tree->state, start);
  1070. if (!node || IS_ERR(node)) {
  1071. ret = -ENOENT;
  1072. goto out;
  1073. }
  1074. state = rb_entry(node, struct extent_state, rb_node);
  1075. if (state->start != start) {
  1076. ret = -ENOENT;
  1077. goto out;
  1078. }
  1079. *private = state->private;
  1080. out:
  1081. read_unlock_irq(&tree->lock);
  1082. return ret;
  1083. }
  1084. /*
  1085. * searches a range in the state tree for a given mask.
  1086. * If 'filled' == 1, this returns 1 only if ever extent in the tree
  1087. * has the bits set. Otherwise, 1 is returned if any bit in the
  1088. * range is found set.
  1089. */
  1090. int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
  1091. int bits, int filled)
  1092. {
  1093. struct extent_state *state = NULL;
  1094. struct rb_node *node;
  1095. int bitset = 0;
  1096. read_lock_irq(&tree->lock);
  1097. node = tree_search(&tree->state, start);
  1098. while (node && start <= end) {
  1099. state = rb_entry(node, struct extent_state, rb_node);
  1100. if (state->start > end)
  1101. break;
  1102. if (filled && state->start > start) {
  1103. bitset = 0;
  1104. break;
  1105. }
  1106. if (state->state & bits) {
  1107. bitset = 1;
  1108. if (!filled)
  1109. break;
  1110. } else if (filled) {
  1111. bitset = 0;
  1112. break;
  1113. }
  1114. start = state->end + 1;
  1115. if (start > end)
  1116. break;
  1117. node = rb_next(node);
  1118. }
  1119. read_unlock_irq(&tree->lock);
  1120. return bitset;
  1121. }
  1122. EXPORT_SYMBOL(test_range_bit);
  1123. /*
  1124. * helper function to set a given page up to date if all the
  1125. * extents in the tree for that page are up to date
  1126. */
  1127. static int check_page_uptodate(struct extent_map_tree *tree,
  1128. struct page *page)
  1129. {
  1130. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1131. u64 end = start + PAGE_CACHE_SIZE - 1;
  1132. if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
  1133. SetPageUptodate(page);
  1134. return 0;
  1135. }
  1136. /*
  1137. * helper function to unlock a page if all the extents in the tree
  1138. * for that page are unlocked
  1139. */
  1140. static int check_page_locked(struct extent_map_tree *tree,
  1141. struct page *page)
  1142. {
  1143. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1144. u64 end = start + PAGE_CACHE_SIZE - 1;
  1145. if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
  1146. unlock_page(page);
  1147. return 0;
  1148. }
  1149. /*
  1150. * helper function to end page writeback if all the extents
  1151. * in the tree for that page are done with writeback
  1152. */
  1153. static int check_page_writeback(struct extent_map_tree *tree,
  1154. struct page *page)
  1155. {
  1156. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1157. u64 end = start + PAGE_CACHE_SIZE - 1;
  1158. if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
  1159. end_page_writeback(page);
  1160. return 0;
  1161. }
  1162. /* lots and lots of room for performance fixes in the end_bio funcs */
  1163. /*
  1164. * after a writepage IO is done, we need to:
  1165. * clear the uptodate bits on error
  1166. * clear the writeback bits in the extent tree for this IO
  1167. * end_page_writeback if the page has no more pending IO
  1168. *
  1169. * Scheduling is not allowed, so the extent state tree is expected
  1170. * to have one and only one object corresponding to this IO.
  1171. */
  1172. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  1173. static void end_bio_extent_writepage(struct bio *bio, int err)
  1174. #else
  1175. static int end_bio_extent_writepage(struct bio *bio,
  1176. unsigned int bytes_done, int err)
  1177. #endif
  1178. {
  1179. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1180. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1181. struct extent_map_tree *tree = bio->bi_private;
  1182. u64 start;
  1183. u64 end;
  1184. int whole_page;
  1185. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1186. if (bio->bi_size)
  1187. return 1;
  1188. #endif
  1189. do {
  1190. struct page *page = bvec->bv_page;
  1191. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1192. bvec->bv_offset;
  1193. end = start + bvec->bv_len - 1;
  1194. if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
  1195. whole_page = 1;
  1196. else
  1197. whole_page = 0;
  1198. if (--bvec >= bio->bi_io_vec)
  1199. prefetchw(&bvec->bv_page->flags);
  1200. if (!uptodate) {
  1201. clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1202. ClearPageUptodate(page);
  1203. SetPageError(page);
  1204. }
  1205. clear_extent_writeback(tree, start, end, GFP_ATOMIC);
  1206. if (whole_page)
  1207. end_page_writeback(page);
  1208. else
  1209. check_page_writeback(tree, page);
  1210. if (tree->ops && tree->ops->writepage_end_io_hook)
  1211. tree->ops->writepage_end_io_hook(page, start, end);
  1212. } while (bvec >= bio->bi_io_vec);
  1213. bio_put(bio);
  1214. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1215. return 0;
  1216. #endif
  1217. }
  1218. /*
  1219. * after a readpage IO is done, we need to:
  1220. * clear the uptodate bits on error
  1221. * set the uptodate bits if things worked
  1222. * set the page up to date if all extents in the tree are uptodate
  1223. * clear the lock bit in the extent tree
  1224. * unlock the page if there are no other extents locked for it
  1225. *
  1226. * Scheduling is not allowed, so the extent state tree is expected
  1227. * to have one and only one object corresponding to this IO.
  1228. */
  1229. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  1230. static void end_bio_extent_readpage(struct bio *bio, int err)
  1231. #else
  1232. static int end_bio_extent_readpage(struct bio *bio,
  1233. unsigned int bytes_done, int err)
  1234. #endif
  1235. {
  1236. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1237. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1238. struct extent_map_tree *tree = bio->bi_private;
  1239. u64 start;
  1240. u64 end;
  1241. int whole_page;
  1242. int ret;
  1243. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1244. if (bio->bi_size)
  1245. return 1;
  1246. #endif
  1247. do {
  1248. struct page *page = bvec->bv_page;
  1249. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1250. bvec->bv_offset;
  1251. end = start + bvec->bv_len - 1;
  1252. if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
  1253. whole_page = 1;
  1254. else
  1255. whole_page = 0;
  1256. if (--bvec >= bio->bi_io_vec)
  1257. prefetchw(&bvec->bv_page->flags);
  1258. if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
  1259. ret = tree->ops->readpage_end_io_hook(page, start, end);
  1260. if (ret)
  1261. uptodate = 0;
  1262. }
  1263. if (uptodate) {
  1264. set_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1265. if (whole_page)
  1266. SetPageUptodate(page);
  1267. else
  1268. check_page_uptodate(tree, page);
  1269. } else {
  1270. ClearPageUptodate(page);
  1271. SetPageError(page);
  1272. }
  1273. unlock_extent(tree, start, end, GFP_ATOMIC);
  1274. if (whole_page)
  1275. unlock_page(page);
  1276. else
  1277. check_page_locked(tree, page);
  1278. } while (bvec >= bio->bi_io_vec);
  1279. bio_put(bio);
  1280. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1281. return 0;
  1282. #endif
  1283. }
  1284. /*
  1285. * IO done from prepare_write is pretty simple, we just unlock
  1286. * the structs in the extent tree when done, and set the uptodate bits
  1287. * as appropriate.
  1288. */
  1289. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  1290. static void end_bio_extent_preparewrite(struct bio *bio, int err)
  1291. #else
  1292. static int end_bio_extent_preparewrite(struct bio *bio,
  1293. unsigned int bytes_done, int err)
  1294. #endif
  1295. {
  1296. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1297. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  1298. struct extent_map_tree *tree = bio->bi_private;
  1299. u64 start;
  1300. u64 end;
  1301. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1302. if (bio->bi_size)
  1303. return 1;
  1304. #endif
  1305. do {
  1306. struct page *page = bvec->bv_page;
  1307. start = ((u64)page->index << PAGE_CACHE_SHIFT) +
  1308. bvec->bv_offset;
  1309. end = start + bvec->bv_len - 1;
  1310. if (--bvec >= bio->bi_io_vec)
  1311. prefetchw(&bvec->bv_page->flags);
  1312. if (uptodate) {
  1313. set_extent_uptodate(tree, start, end, GFP_ATOMIC);
  1314. } else {
  1315. ClearPageUptodate(page);
  1316. SetPageError(page);
  1317. }
  1318. unlock_extent(tree, start, end, GFP_ATOMIC);
  1319. } while (bvec >= bio->bi_io_vec);
  1320. bio_put(bio);
  1321. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
  1322. return 0;
  1323. #endif
  1324. }
  1325. static int submit_extent_page(int rw, struct extent_map_tree *tree,
  1326. struct page *page, sector_t sector,
  1327. size_t size, unsigned long offset,
  1328. struct block_device *bdev,
  1329. bio_end_io_t end_io_func)
  1330. {
  1331. struct bio *bio;
  1332. int ret = 0;
  1333. bio = bio_alloc(GFP_NOIO, 1);
  1334. bio->bi_sector = sector;
  1335. bio->bi_bdev = bdev;
  1336. bio->bi_io_vec[0].bv_page = page;
  1337. bio->bi_io_vec[0].bv_len = size;
  1338. bio->bi_io_vec[0].bv_offset = offset;
  1339. bio->bi_vcnt = 1;
  1340. bio->bi_idx = 0;
  1341. bio->bi_size = size;
  1342. bio->bi_end_io = end_io_func;
  1343. bio->bi_private = tree;
  1344. bio_get(bio);
  1345. submit_bio(rw, bio);
  1346. if (bio_flagged(bio, BIO_EOPNOTSUPP))
  1347. ret = -EOPNOTSUPP;
  1348. bio_put(bio);
  1349. return ret;
  1350. }
  1351. void set_page_extent_mapped(struct page *page)
  1352. {
  1353. if (!PagePrivate(page)) {
  1354. SetPagePrivate(page);
  1355. WARN_ON(!page->mapping->a_ops->invalidatepage);
  1356. set_page_private(page, EXTENT_PAGE_PRIVATE);
  1357. page_cache_get(page);
  1358. }
  1359. }
  1360. /*
  1361. * basic readpage implementation. Locked extent state structs are inserted
  1362. * into the tree that are removed when the IO is done (by the end_io
  1363. * handlers)
  1364. */
  1365. int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
  1366. get_extent_t *get_extent)
  1367. {
  1368. struct inode *inode = page->mapping->host;
  1369. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1370. u64 page_end = start + PAGE_CACHE_SIZE - 1;
  1371. u64 end;
  1372. u64 cur = start;
  1373. u64 extent_offset;
  1374. u64 last_byte = i_size_read(inode);
  1375. u64 block_start;
  1376. u64 cur_end;
  1377. sector_t sector;
  1378. struct extent_map *em;
  1379. struct block_device *bdev;
  1380. int ret;
  1381. int nr = 0;
  1382. size_t page_offset = 0;
  1383. size_t iosize;
  1384. size_t blocksize = inode->i_sb->s_blocksize;
  1385. set_page_extent_mapped(page);
  1386. end = page_end;
  1387. lock_extent(tree, start, end, GFP_NOFS);
  1388. while (cur <= end) {
  1389. if (cur >= last_byte) {
  1390. iosize = PAGE_CACHE_SIZE - page_offset;
  1391. zero_user_page(page, page_offset, iosize, KM_USER0);
  1392. set_extent_uptodate(tree, cur, cur + iosize - 1,
  1393. GFP_NOFS);
  1394. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1395. break;
  1396. }
  1397. em = get_extent(inode, page, page_offset, cur, end, 0);
  1398. if (IS_ERR(em) || !em) {
  1399. SetPageError(page);
  1400. unlock_extent(tree, cur, end, GFP_NOFS);
  1401. break;
  1402. }
  1403. extent_offset = cur - em->start;
  1404. BUG_ON(em->end < cur);
  1405. BUG_ON(end < cur);
  1406. iosize = min(em->end - cur, end - cur) + 1;
  1407. cur_end = min(em->end, end);
  1408. iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
  1409. sector = (em->block_start + extent_offset) >> 9;
  1410. bdev = em->bdev;
  1411. block_start = em->block_start;
  1412. free_extent_map(em);
  1413. em = NULL;
  1414. /* we've found a hole, just zero and go on */
  1415. if (block_start == EXTENT_MAP_HOLE) {
  1416. zero_user_page(page, page_offset, iosize, KM_USER0);
  1417. set_extent_uptodate(tree, cur, cur + iosize - 1,
  1418. GFP_NOFS);
  1419. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1420. cur = cur + iosize;
  1421. page_offset += iosize;
  1422. continue;
  1423. }
  1424. /* the get_extent function already copied into the page */
  1425. if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
  1426. unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
  1427. cur = cur + iosize;
  1428. page_offset += iosize;
  1429. continue;
  1430. }
  1431. ret = 0;
  1432. if (tree->ops && tree->ops->readpage_io_hook) {
  1433. ret = tree->ops->readpage_io_hook(page, cur,
  1434. cur + iosize - 1);
  1435. }
  1436. if (!ret) {
  1437. ret = submit_extent_page(READ, tree, page,
  1438. sector, iosize, page_offset,
  1439. bdev, end_bio_extent_readpage);
  1440. }
  1441. if (ret)
  1442. SetPageError(page);
  1443. cur = cur + iosize;
  1444. page_offset += iosize;
  1445. nr++;
  1446. }
  1447. if (!nr) {
  1448. if (!PageError(page))
  1449. SetPageUptodate(page);
  1450. unlock_page(page);
  1451. }
  1452. return 0;
  1453. }
  1454. EXPORT_SYMBOL(extent_read_full_page);
  1455. /*
  1456. * the writepage semantics are similar to regular writepage. extent
  1457. * records are inserted to lock ranges in the tree, and as dirty areas
  1458. * are found, they are marked writeback. Then the lock bits are removed
  1459. * and the end_io handler clears the writeback ranges
  1460. */
  1461. int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
  1462. get_extent_t *get_extent,
  1463. struct writeback_control *wbc)
  1464. {
  1465. struct inode *inode = page->mapping->host;
  1466. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1467. u64 page_end = start + PAGE_CACHE_SIZE - 1;
  1468. u64 end;
  1469. u64 cur = start;
  1470. u64 extent_offset;
  1471. u64 last_byte = i_size_read(inode);
  1472. u64 block_start;
  1473. u64 iosize;
  1474. sector_t sector;
  1475. struct extent_map *em;
  1476. struct block_device *bdev;
  1477. int ret;
  1478. int nr = 0;
  1479. size_t page_offset = 0;
  1480. size_t blocksize;
  1481. loff_t i_size = i_size_read(inode);
  1482. unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
  1483. u64 nr_delalloc;
  1484. u64 delalloc_end;
  1485. WARN_ON(!PageLocked(page));
  1486. if (page->index > end_index) {
  1487. clear_extent_dirty(tree, start, page_end, GFP_NOFS);
  1488. unlock_page(page);
  1489. return 0;
  1490. }
  1491. if (page->index == end_index) {
  1492. size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
  1493. zero_user_page(page, offset,
  1494. PAGE_CACHE_SIZE - offset, KM_USER0);
  1495. }
  1496. set_page_extent_mapped(page);
  1497. lock_extent(tree, start, page_end, GFP_NOFS);
  1498. nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
  1499. &delalloc_end,
  1500. 128 * 1024 * 1024);
  1501. if (nr_delalloc) {
  1502. tree->ops->fill_delalloc(inode, start, delalloc_end);
  1503. if (delalloc_end >= page_end + 1) {
  1504. clear_extent_bit(tree, page_end + 1, delalloc_end,
  1505. EXTENT_LOCKED | EXTENT_DELALLOC,
  1506. 1, 0, GFP_NOFS);
  1507. }
  1508. clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
  1509. 0, 0, GFP_NOFS);
  1510. if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
  1511. printk("found delalloc bits after clear extent_bit\n");
  1512. }
  1513. } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
  1514. printk("found delalloc bits after find_delalloc_range returns 0\n");
  1515. }
  1516. end = page_end;
  1517. if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
  1518. printk("found delalloc bits after lock_extent\n");
  1519. }
  1520. if (last_byte <= start) {
  1521. clear_extent_dirty(tree, start, page_end, GFP_NOFS);
  1522. goto done;
  1523. }
  1524. set_extent_uptodate(tree, start, page_end, GFP_NOFS);
  1525. blocksize = inode->i_sb->s_blocksize;
  1526. while (cur <= end) {
  1527. if (cur >= last_byte) {
  1528. clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
  1529. break;
  1530. }
  1531. em = get_extent(inode, page, page_offset, cur, end, 1);
  1532. if (IS_ERR(em) || !em) {
  1533. SetPageError(page);
  1534. break;
  1535. }
  1536. extent_offset = cur - em->start;
  1537. BUG_ON(em->end < cur);
  1538. BUG_ON(end < cur);
  1539. iosize = min(em->end - cur, end - cur) + 1;
  1540. iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
  1541. sector = (em->block_start + extent_offset) >> 9;
  1542. bdev = em->bdev;
  1543. block_start = em->block_start;
  1544. free_extent_map(em);
  1545. em = NULL;
  1546. if (block_start == EXTENT_MAP_HOLE ||
  1547. block_start == EXTENT_MAP_INLINE) {
  1548. clear_extent_dirty(tree, cur,
  1549. cur + iosize - 1, GFP_NOFS);
  1550. cur = cur + iosize;
  1551. page_offset += iosize;
  1552. continue;
  1553. }
  1554. /* leave this out until we have a page_mkwrite call */
  1555. if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
  1556. EXTENT_DIRTY, 0)) {
  1557. cur = cur + iosize;
  1558. page_offset += iosize;
  1559. continue;
  1560. }
  1561. clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
  1562. if (tree->ops && tree->ops->writepage_io_hook) {
  1563. ret = tree->ops->writepage_io_hook(page, cur,
  1564. cur + iosize - 1);
  1565. } else {
  1566. ret = 0;
  1567. }
  1568. if (ret)
  1569. SetPageError(page);
  1570. else {
  1571. set_range_writeback(tree, cur, cur + iosize - 1);
  1572. ret = submit_extent_page(WRITE, tree, page, sector,
  1573. iosize, page_offset, bdev,
  1574. end_bio_extent_writepage);
  1575. if (ret)
  1576. SetPageError(page);
  1577. }
  1578. cur = cur + iosize;
  1579. page_offset += iosize;
  1580. nr++;
  1581. }
  1582. done:
  1583. unlock_extent(tree, start, page_end, GFP_NOFS);
  1584. unlock_page(page);
  1585. return 0;
  1586. }
  1587. EXPORT_SYMBOL(extent_write_full_page);
  1588. /*
  1589. * basic invalidatepage code, this waits on any locked or writeback
  1590. * ranges corresponding to the page, and then deletes any extent state
  1591. * records from the tree
  1592. */
  1593. int extent_invalidatepage(struct extent_map_tree *tree,
  1594. struct page *page, unsigned long offset)
  1595. {
  1596. u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
  1597. u64 end = start + PAGE_CACHE_SIZE - 1;
  1598. size_t blocksize = page->mapping->host->i_sb->s_blocksize;
  1599. start += (offset + blocksize -1) & ~(blocksize - 1);
  1600. if (start > end)
  1601. return 0;
  1602. lock_extent(tree, start, end, GFP_NOFS);
  1603. wait_on_extent_writeback(tree, start, end);
  1604. clear_extent_bit(tree, start, end,
  1605. EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
  1606. 1, 1, GFP_NOFS);
  1607. return 0;
  1608. }
  1609. EXPORT_SYMBOL(extent_invalidatepage);
  1610. /*
  1611. * simple commit_write call, set_range_dirty is used to mark both
  1612. * the pages and the extent records as dirty
  1613. */
  1614. int extent_commit_write(struct extent_map_tree *tree,
  1615. struct inode *inode, struct page *page,
  1616. unsigned from, unsigned to)
  1617. {
  1618. loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
  1619. set_page_extent_mapped(page);
  1620. set_page_dirty(page);
  1621. if (pos > inode->i_size) {
  1622. i_size_write(inode, pos);
  1623. mark_inode_dirty(inode);
  1624. }
  1625. return 0;
  1626. }
  1627. EXPORT_SYMBOL(extent_commit_write);
  1628. int extent_prepare_write(struct extent_map_tree *tree,
  1629. struct inode *inode, struct page *page,
  1630. unsigned from, unsigned to, get_extent_t *get_extent)
  1631. {
  1632. u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  1633. u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
  1634. u64 block_start;
  1635. u64 orig_block_start;
  1636. u64 block_end;
  1637. u64 cur_end;
  1638. struct extent_map *em;
  1639. unsigned blocksize = 1 << inode->i_blkbits;
  1640. size_t page_offset = 0;
  1641. size_t block_off_start;
  1642. size_t block_off_end;
  1643. int err = 0;
  1644. int iocount = 0;
  1645. int ret = 0;
  1646. int isnew;
  1647. set_page_extent_mapped(page);
  1648. block_start = (page_start + from) & ~((u64)blocksize - 1);
  1649. block_end = (page_start + to - 1) | (blocksize - 1);
  1650. orig_block_start = block_start;
  1651. lock_extent(tree, page_start, page_end, GFP_NOFS);
  1652. while(block_start <= block_end) {
  1653. em = get_extent(inode, page, page_offset, block_start,
  1654. block_end, 1);
  1655. if (IS_ERR(em) || !em) {
  1656. goto err;
  1657. }
  1658. cur_end = min(block_end, em->end);
  1659. block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
  1660. block_off_end = block_off_start + blocksize;
  1661. isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
  1662. if (!PageUptodate(page) && isnew &&
  1663. (block_off_end > to || block_off_start < from)) {
  1664. void *kaddr;
  1665. kaddr = kmap_atomic(page, KM_USER0);
  1666. if (block_off_end > to)
  1667. memset(kaddr + to, 0, block_off_end - to);
  1668. if (block_off_start < from)
  1669. memset(kaddr + block_off_start, 0,
  1670. from - block_off_start);
  1671. flush_dcache_page(page);
  1672. kunmap_atomic(kaddr, KM_USER0);
  1673. }
  1674. if (!isnew && !PageUptodate(page) &&
  1675. (block_off_end > to || block_off_start < from) &&
  1676. !test_range_bit(tree, block_start, cur_end,
  1677. EXTENT_UPTODATE, 1)) {
  1678. u64 sector;
  1679. u64 extent_offset = block_start - em->start;
  1680. size_t iosize;
  1681. sector = (em->block_start + extent_offset) >> 9;
  1682. iosize = (cur_end - block_start + blocksize - 1) &
  1683. ~((u64)blocksize - 1);
  1684. /*
  1685. * we've already got the extent locked, but we
  1686. * need to split the state such that our end_bio
  1687. * handler can clear the lock.
  1688. */
  1689. set_extent_bit(tree, block_start,
  1690. block_start + iosize - 1,
  1691. EXTENT_LOCKED, 0, NULL, GFP_NOFS);
  1692. ret = submit_extent_page(READ, tree, page,
  1693. sector, iosize, page_offset, em->bdev,
  1694. end_bio_extent_preparewrite);
  1695. iocount++;
  1696. block_start = block_start + iosize;
  1697. } else {
  1698. set_extent_uptodate(tree, block_start, cur_end,
  1699. GFP_NOFS);
  1700. unlock_extent(tree, block_start, cur_end, GFP_NOFS);
  1701. block_start = cur_end + 1;
  1702. }
  1703. page_offset = block_start & (PAGE_CACHE_SIZE - 1);
  1704. free_extent_map(em);
  1705. }
  1706. if (iocount) {
  1707. wait_extent_bit(tree, orig_block_start,
  1708. block_end, EXTENT_LOCKED);
  1709. }
  1710. check_page_uptodate(tree, page);
  1711. err:
  1712. /* FIXME, zero out newly allocated blocks on error */
  1713. return err;
  1714. }
  1715. EXPORT_SYMBOL(extent_prepare_write);
  1716. /*
  1717. * a helper for releasepage. As long as there are no locked extents
  1718. * in the range corresponding to the page, both state records and extent
  1719. * map records are removed
  1720. */
  1721. int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
  1722. {
  1723. struct extent_map *em;
  1724. u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
  1725. u64 end = start + PAGE_CACHE_SIZE - 1;
  1726. u64 orig_start = start;
  1727. int ret = 1;
  1728. while (start <= end) {
  1729. em = lookup_extent_mapping(tree, start, end);
  1730. if (!em || IS_ERR(em))
  1731. break;
  1732. if (!test_range_bit(tree, em->start, em->end,
  1733. EXTENT_LOCKED, 0)) {
  1734. remove_extent_mapping(tree, em);
  1735. /* once for the rb tree */
  1736. free_extent_map(em);
  1737. }
  1738. start = em->end + 1;
  1739. /* once for us */
  1740. free_extent_map(em);
  1741. }
  1742. if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
  1743. ret = 0;
  1744. else
  1745. clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
  1746. 1, 1, GFP_NOFS);
  1747. return ret;
  1748. }
  1749. EXPORT_SYMBOL(try_release_extent_mapping);
  1750. sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
  1751. get_extent_t *get_extent)
  1752. {
  1753. struct inode *inode = mapping->host;
  1754. u64 start = iblock << inode->i_blkbits;
  1755. u64 end = start + (1 << inode->i_blkbits) - 1;
  1756. sector_t sector = 0;
  1757. struct extent_map *em;
  1758. em = get_extent(inode, NULL, 0, start, end, 0);
  1759. if (!em || IS_ERR(em))
  1760. return 0;
  1761. if (em->block_start == EXTENT_MAP_INLINE ||
  1762. em->block_start == EXTENT_MAP_HOLE)
  1763. goto out;
  1764. sector = (em->block_start + start - em->start) >> inode->i_blkbits;
  1765. out:
  1766. free_extent_map(em);
  1767. return sector;
  1768. }
  1769. static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
  1770. {
  1771. if (list_empty(&eb->lru)) {
  1772. extent_buffer_get(eb);
  1773. list_add(&eb->lru, &tree->buffer_lru);
  1774. tree->lru_size++;
  1775. if (tree->lru_size >= BUFFER_LRU_MAX) {
  1776. struct extent_buffer *rm;
  1777. rm = list_entry(tree->buffer_lru.prev,
  1778. struct extent_buffer, lru);
  1779. tree->lru_size--;
  1780. list_del(&rm->lru);
  1781. free_extent_buffer(rm);
  1782. }
  1783. } else
  1784. list_move(&eb->lru, &tree->buffer_lru);
  1785. return 0;
  1786. }
  1787. static struct extent_buffer *find_lru(struct extent_map_tree *tree,
  1788. u64 start, unsigned long len)
  1789. {
  1790. struct list_head *lru = &tree->buffer_lru;
  1791. struct list_head *cur = lru->next;
  1792. struct extent_buffer *eb;
  1793. if (list_empty(lru))
  1794. return NULL;
  1795. do {
  1796. eb = list_entry(cur, struct extent_buffer, lru);
  1797. if (eb->start == start && eb->len == len) {
  1798. extent_buffer_get(eb);
  1799. return eb;
  1800. }
  1801. cur = cur->next;
  1802. } while (cur != lru);
  1803. return NULL;
  1804. }
  1805. static inline unsigned long num_extent_pages(u64 start, u64 len)
  1806. {
  1807. return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
  1808. (start >> PAGE_CACHE_SHIFT);
  1809. }
  1810. static inline struct page *extent_buffer_page(struct extent_buffer *eb,
  1811. unsigned long i)
  1812. {
  1813. struct page *p;
  1814. struct address_space *mapping;
  1815. if (i == 0)
  1816. return eb->first_page;
  1817. i += eb->start >> PAGE_CACHE_SHIFT;
  1818. mapping = eb->first_page->mapping;
  1819. read_lock_irq(&mapping->tree_lock);
  1820. p = radix_tree_lookup(&mapping->page_tree, i);
  1821. read_unlock_irq(&mapping->tree_lock);
  1822. return p;
  1823. }
  1824. static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
  1825. u64 start,
  1826. unsigned long len,
  1827. gfp_t mask)
  1828. {
  1829. struct extent_buffer *eb = NULL;
  1830. spin_lock(&tree->lru_lock);
  1831. eb = find_lru(tree, start, len);
  1832. if (eb) {
  1833. goto lru_add;
  1834. }
  1835. spin_unlock(&tree->lru_lock);
  1836. if (eb) {
  1837. memset(eb, 0, sizeof(*eb));
  1838. } else {
  1839. eb = kmem_cache_zalloc(extent_buffer_cache, mask);
  1840. }
  1841. INIT_LIST_HEAD(&eb->lru);
  1842. eb->start = start;
  1843. eb->len = len;
  1844. atomic_set(&eb->refs, 1);
  1845. spin_lock(&tree->lru_lock);
  1846. lru_add:
  1847. add_lru(tree, eb);
  1848. spin_unlock(&tree->lru_lock);
  1849. return eb;
  1850. }
  1851. static void __free_extent_buffer(struct extent_buffer *eb)
  1852. {
  1853. kmem_cache_free(extent_buffer_cache, eb);
  1854. }
  1855. struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
  1856. u64 start, unsigned long len,
  1857. struct page *page0,
  1858. gfp_t mask)
  1859. {
  1860. unsigned long num_pages = num_extent_pages(start, len);
  1861. unsigned long i;
  1862. unsigned long index = start >> PAGE_CACHE_SHIFT;
  1863. struct extent_buffer *eb;
  1864. struct page *p;
  1865. struct address_space *mapping = tree->mapping;
  1866. int uptodate = 1;
  1867. eb = __alloc_extent_buffer(tree, start, len, mask);
  1868. if (!eb || IS_ERR(eb))
  1869. return NULL;
  1870. if (eb->flags & EXTENT_BUFFER_FILLED)
  1871. return eb;
  1872. if (page0) {
  1873. eb->first_page = page0;
  1874. i = 1;
  1875. index++;
  1876. page_cache_get(page0);
  1877. mark_page_accessed(page0);
  1878. set_page_extent_mapped(page0);
  1879. set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
  1880. len << 2);
  1881. } else {
  1882. i = 0;
  1883. }
  1884. for (; i < num_pages; i++, index++) {
  1885. p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
  1886. if (!p) {
  1887. WARN_ON(1);
  1888. /* make sure the free only frees the pages we've
  1889. * grabbed a reference on
  1890. */
  1891. eb->len = i << PAGE_CACHE_SHIFT;
  1892. eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
  1893. goto fail;
  1894. }
  1895. set_page_extent_mapped(p);
  1896. mark_page_accessed(p);
  1897. if (i == 0) {
  1898. eb->first_page = p;
  1899. set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
  1900. len << 2);
  1901. } else {
  1902. set_page_private(p, EXTENT_PAGE_PRIVATE);
  1903. }
  1904. if (!PageUptodate(p))
  1905. uptodate = 0;
  1906. unlock_page(p);
  1907. }
  1908. if (uptodate)
  1909. eb->flags |= EXTENT_UPTODATE;
  1910. eb->flags |= EXTENT_BUFFER_FILLED;
  1911. return eb;
  1912. fail:
  1913. free_extent_buffer(eb);
  1914. return NULL;
  1915. }
  1916. EXPORT_SYMBOL(alloc_extent_buffer);
  1917. struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
  1918. u64 start, unsigned long len,
  1919. gfp_t mask)
  1920. {
  1921. unsigned long num_pages = num_extent_pages(start, len);
  1922. unsigned long i; unsigned long index = start >> PAGE_CACHE_SHIFT;
  1923. struct extent_buffer *eb;
  1924. struct page *p;
  1925. struct address_space *mapping = tree->mapping;
  1926. int uptodate = 1;
  1927. eb = __alloc_extent_buffer(tree, start, len, mask);
  1928. if (!eb || IS_ERR(eb))
  1929. return NULL;
  1930. if (eb->flags & EXTENT_BUFFER_FILLED)
  1931. return eb;
  1932. for (i = 0; i < num_pages; i++, index++) {
  1933. p = find_lock_page(mapping, index);
  1934. if (!p) {
  1935. /* make sure the free only frees the pages we've
  1936. * grabbed a reference on
  1937. */
  1938. eb->len = i << PAGE_CACHE_SHIFT;
  1939. eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
  1940. goto fail;
  1941. }
  1942. set_page_extent_mapped(p);
  1943. mark_page_accessed(p);
  1944. if (i == 0) {
  1945. eb->first_page = p;
  1946. set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
  1947. len << 2);
  1948. } else {
  1949. set_page_private(p, EXTENT_PAGE_PRIVATE);
  1950. }
  1951. if (!PageUptodate(p))
  1952. uptodate = 0;
  1953. unlock_page(p);
  1954. }
  1955. if (uptodate)
  1956. eb->flags |= EXTENT_UPTODATE;
  1957. eb->flags |= EXTENT_BUFFER_FILLED;
  1958. return eb;
  1959. fail:
  1960. free_extent_buffer(eb);
  1961. return NULL;
  1962. }
  1963. EXPORT_SYMBOL(find_extent_buffer);
  1964. void free_extent_buffer(struct extent_buffer *eb)
  1965. {
  1966. unsigned long i;
  1967. unsigned long num_pages;
  1968. if (!eb)
  1969. return;
  1970. if (!atomic_dec_and_test(&eb->refs))
  1971. return;
  1972. num_pages = num_extent_pages(eb->start, eb->len);
  1973. for (i = 0; i < num_pages; i++) {
  1974. page_cache_release(extent_buffer_page(eb, i));
  1975. }
  1976. __free_extent_buffer(eb);
  1977. }
  1978. EXPORT_SYMBOL(free_extent_buffer);
  1979. int clear_extent_buffer_dirty(struct extent_map_tree *tree,
  1980. struct extent_buffer *eb)
  1981. {
  1982. int set;
  1983. unsigned long i;
  1984. unsigned long num_pages;
  1985. struct page *page;
  1986. u64 start = eb->start;
  1987. u64 end = start + eb->len - 1;
  1988. set = clear_extent_dirty(tree, start, end, GFP_NOFS);
  1989. num_pages = num_extent_pages(eb->start, eb->len);
  1990. for (i = 0; i < num_pages; i++) {
  1991. page = extent_buffer_page(eb, i);
  1992. lock_page(page);
  1993. /*
  1994. * if we're on the last page or the first page and the
  1995. * block isn't aligned on a page boundary, do extra checks
  1996. * to make sure we don't clean page that is partially dirty
  1997. */
  1998. if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
  1999. ((i == num_pages - 1) &&
  2000. ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
  2001. start = (u64)page->index << PAGE_CACHE_SHIFT;
  2002. end = start + PAGE_CACHE_SIZE - 1;
  2003. if (test_range_bit(tree, start, end,
  2004. EXTENT_DIRTY, 0)) {
  2005. unlock_page(page);
  2006. continue;
  2007. }
  2008. }
  2009. clear_page_dirty_for_io(page);
  2010. unlock_page(page);
  2011. }
  2012. return 0;
  2013. }
  2014. EXPORT_SYMBOL(clear_extent_buffer_dirty);
  2015. int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
  2016. struct extent_buffer *eb)
  2017. {
  2018. return wait_on_extent_writeback(tree, eb->start,
  2019. eb->start + eb->len - 1);
  2020. }
  2021. EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
  2022. int set_extent_buffer_dirty(struct extent_map_tree *tree,
  2023. struct extent_buffer *eb)
  2024. {
  2025. unsigned long i;
  2026. unsigned long num_pages;
  2027. num_pages = num_extent_pages(eb->start, eb->len);
  2028. for (i = 0; i < num_pages; i++) {
  2029. struct page *page = extent_buffer_page(eb, i);
  2030. /* writepage may need to do something special for the
  2031. * first page, we have to make sure page->private is
  2032. * properly set. releasepage may drop page->private
  2033. * on us if the page isn't already dirty.
  2034. */
  2035. if (i == 0) {
  2036. lock_page(page);
  2037. set_page_private(page,
  2038. EXTENT_PAGE_PRIVATE_FIRST_PAGE |
  2039. eb->len << 2);
  2040. }
  2041. __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
  2042. if (i == 0)
  2043. unlock_page(page);
  2044. }
  2045. return set_extent_dirty(tree, eb->start,
  2046. eb->start + eb->len - 1, GFP_NOFS);
  2047. }
  2048. EXPORT_SYMBOL(set_extent_buffer_dirty);
  2049. int set_extent_buffer_uptodate(struct extent_map_tree *tree,
  2050. struct extent_buffer *eb)
  2051. {
  2052. unsigned long i;
  2053. struct page *page;
  2054. unsigned long num_pages;
  2055. num_pages = num_extent_pages(eb->start, eb->len);
  2056. set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
  2057. GFP_NOFS);
  2058. for (i = 0; i < num_pages; i++) {
  2059. page = extent_buffer_page(eb, i);
  2060. if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
  2061. ((i == num_pages - 1) &&
  2062. ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
  2063. check_page_uptodate(tree, page);
  2064. continue;
  2065. }
  2066. SetPageUptodate(page);
  2067. }
  2068. return 0;
  2069. }
  2070. EXPORT_SYMBOL(set_extent_buffer_uptodate);
  2071. int extent_buffer_uptodate(struct extent_map_tree *tree,
  2072. struct extent_buffer *eb)
  2073. {
  2074. if (eb->flags & EXTENT_UPTODATE)
  2075. return 1;
  2076. return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
  2077. EXTENT_UPTODATE, 1);
  2078. }
  2079. EXPORT_SYMBOL(extent_buffer_uptodate);
  2080. int read_extent_buffer_pages(struct extent_map_tree *tree,
  2081. struct extent_buffer *eb,
  2082. u64 start,
  2083. int wait)
  2084. {
  2085. unsigned long i;
  2086. unsigned long start_i;
  2087. struct page *page;
  2088. int err;
  2089. int ret = 0;
  2090. unsigned long num_pages;
  2091. if (eb->flags & EXTENT_UPTODATE)
  2092. return 0;
  2093. if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
  2094. EXTENT_UPTODATE, 1)) {
  2095. return 0;
  2096. }
  2097. if (start) {
  2098. WARN_ON(start < eb->start);
  2099. start_i = (start >> PAGE_CACHE_SHIFT) -
  2100. (eb->start >> PAGE_CACHE_SHIFT);
  2101. } else {
  2102. start_i = 0;
  2103. }
  2104. num_pages = num_extent_pages(eb->start, eb->len);
  2105. for (i = start_i; i < num_pages; i++) {
  2106. page = extent_buffer_page(eb, i);
  2107. if (PageUptodate(page)) {
  2108. continue;
  2109. }
  2110. if (!wait) {
  2111. if (TestSetPageLocked(page)) {
  2112. continue;
  2113. }
  2114. } else {
  2115. lock_page(page);
  2116. }
  2117. if (!PageUptodate(page)) {
  2118. err = page->mapping->a_ops->readpage(NULL, page);
  2119. if (err) {
  2120. ret = err;
  2121. }
  2122. } else {
  2123. unlock_page(page);
  2124. }
  2125. }
  2126. if (ret || !wait) {
  2127. return ret;
  2128. }
  2129. for (i = start_i; i < num_pages; i++) {
  2130. page = extent_buffer_page(eb, i);
  2131. wait_on_page_locked(page);
  2132. if (!PageUptodate(page)) {
  2133. ret = -EIO;
  2134. }
  2135. }
  2136. if (!ret)
  2137. eb->flags |= EXTENT_UPTODATE;
  2138. return ret;
  2139. }
  2140. EXPORT_SYMBOL(read_extent_buffer_pages);
  2141. void read_extent_buffer(struct extent_buffer *eb, void *dstv,
  2142. unsigned long start,
  2143. unsigned long len)
  2144. {
  2145. size_t cur;
  2146. size_t offset;
  2147. struct page *page;
  2148. char *kaddr;
  2149. char *dst = (char *)dstv;
  2150. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2151. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2152. unsigned long num_pages = num_extent_pages(eb->start, eb->len);
  2153. WARN_ON(start > eb->len);
  2154. WARN_ON(start + len > eb->start + eb->len);
  2155. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2156. while(len > 0) {
  2157. page = extent_buffer_page(eb, i);
  2158. if (!PageUptodate(page)) {
  2159. printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
  2160. WARN_ON(1);
  2161. }
  2162. WARN_ON(!PageUptodate(page));
  2163. cur = min(len, (PAGE_CACHE_SIZE - offset));
  2164. kaddr = kmap_atomic(page, KM_USER1);
  2165. memcpy(dst, kaddr + offset, cur);
  2166. kunmap_atomic(kaddr, KM_USER1);
  2167. dst += cur;
  2168. len -= cur;
  2169. offset = 0;
  2170. i++;
  2171. }
  2172. }
  2173. EXPORT_SYMBOL(read_extent_buffer);
  2174. int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
  2175. unsigned long min_len, char **token, char **map,
  2176. unsigned long *map_start,
  2177. unsigned long *map_len, int km)
  2178. {
  2179. size_t offset = start & (PAGE_CACHE_SIZE - 1);
  2180. char *kaddr;
  2181. struct page *p;
  2182. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2183. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2184. unsigned long end_i = (start_offset + start + min_len - 1) >>
  2185. PAGE_CACHE_SHIFT;
  2186. if (i != end_i)
  2187. return -EINVAL;
  2188. if (i == 0) {
  2189. offset = start_offset;
  2190. *map_start = 0;
  2191. } else {
  2192. offset = 0;
  2193. *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
  2194. }
  2195. if (start + min_len > eb->len) {
  2196. printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
  2197. WARN_ON(1);
  2198. }
  2199. p = extent_buffer_page(eb, i);
  2200. WARN_ON(!PageUptodate(p));
  2201. kaddr = kmap_atomic(p, km);
  2202. *token = kaddr;
  2203. *map = kaddr + offset;
  2204. *map_len = PAGE_CACHE_SIZE - offset;
  2205. return 0;
  2206. }
  2207. EXPORT_SYMBOL(map_private_extent_buffer);
  2208. int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
  2209. unsigned long min_len,
  2210. char **token, char **map,
  2211. unsigned long *map_start,
  2212. unsigned long *map_len, int km)
  2213. {
  2214. int err;
  2215. int save = 0;
  2216. if (eb->map_token) {
  2217. unmap_extent_buffer(eb, eb->map_token, km);
  2218. eb->map_token = NULL;
  2219. save = 1;
  2220. }
  2221. err = map_private_extent_buffer(eb, start, min_len, token, map,
  2222. map_start, map_len, km);
  2223. if (!err && save) {
  2224. eb->map_token = *token;
  2225. eb->kaddr = *map;
  2226. eb->map_start = *map_start;
  2227. eb->map_len = *map_len;
  2228. }
  2229. return err;
  2230. }
  2231. EXPORT_SYMBOL(map_extent_buffer);
  2232. void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
  2233. {
  2234. kunmap_atomic(token, km);
  2235. }
  2236. EXPORT_SYMBOL(unmap_extent_buffer);
  2237. int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
  2238. unsigned long start,
  2239. unsigned long len)
  2240. {
  2241. size_t cur;
  2242. size_t offset;
  2243. struct page *page;
  2244. char *kaddr;
  2245. char *ptr = (char *)ptrv;
  2246. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2247. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2248. int ret = 0;
  2249. WARN_ON(start > eb->len);
  2250. WARN_ON(start + len > eb->start + eb->len);
  2251. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2252. while(len > 0) {
  2253. page = extent_buffer_page(eb, i);
  2254. WARN_ON(!PageUptodate(page));
  2255. cur = min(len, (PAGE_CACHE_SIZE - offset));
  2256. kaddr = kmap_atomic(page, KM_USER0);
  2257. ret = memcmp(ptr, kaddr + offset, cur);
  2258. kunmap_atomic(kaddr, KM_USER0);
  2259. if (ret)
  2260. break;
  2261. ptr += cur;
  2262. len -= cur;
  2263. offset = 0;
  2264. i++;
  2265. }
  2266. return ret;
  2267. }
  2268. EXPORT_SYMBOL(memcmp_extent_buffer);
  2269. void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
  2270. unsigned long start, unsigned long len)
  2271. {
  2272. size_t cur;
  2273. size_t offset;
  2274. struct page *page;
  2275. char *kaddr;
  2276. char *src = (char *)srcv;
  2277. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2278. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2279. WARN_ON(start > eb->len);
  2280. WARN_ON(start + len > eb->start + eb->len);
  2281. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2282. while(len > 0) {
  2283. page = extent_buffer_page(eb, i);
  2284. WARN_ON(!PageUptodate(page));
  2285. cur = min(len, PAGE_CACHE_SIZE - offset);
  2286. kaddr = kmap_atomic(page, KM_USER1);
  2287. memcpy(kaddr + offset, src, cur);
  2288. kunmap_atomic(kaddr, KM_USER1);
  2289. src += cur;
  2290. len -= cur;
  2291. offset = 0;
  2292. i++;
  2293. }
  2294. }
  2295. EXPORT_SYMBOL(write_extent_buffer);
  2296. void memset_extent_buffer(struct extent_buffer *eb, char c,
  2297. unsigned long start, unsigned long len)
  2298. {
  2299. size_t cur;
  2300. size_t offset;
  2301. struct page *page;
  2302. char *kaddr;
  2303. size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
  2304. unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
  2305. WARN_ON(start > eb->len);
  2306. WARN_ON(start + len > eb->start + eb->len);
  2307. offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
  2308. while(len > 0) {
  2309. page = extent_buffer_page(eb, i);
  2310. WARN_ON(!PageUptodate(page));
  2311. cur = min(len, PAGE_CACHE_SIZE - offset);
  2312. kaddr = kmap_atomic(page, KM_USER0);
  2313. memset(kaddr + offset, c, cur);
  2314. kunmap_atomic(kaddr, KM_USER0);
  2315. len -= cur;
  2316. offset = 0;
  2317. i++;
  2318. }
  2319. }
  2320. EXPORT_SYMBOL(memset_extent_buffer);
  2321. void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
  2322. unsigned long dst_offset, unsigned long src_offset,
  2323. unsigned long len)
  2324. {
  2325. u64 dst_len = dst->len;
  2326. size_t cur;
  2327. size_t offset;
  2328. struct page *page;
  2329. char *kaddr;
  2330. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  2331. unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
  2332. WARN_ON(src->len != dst_len);
  2333. offset = (start_offset + dst_offset) &
  2334. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2335. while(len > 0) {
  2336. page = extent_buffer_page(dst, i);
  2337. WARN_ON(!PageUptodate(page));
  2338. cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
  2339. kaddr = kmap_atomic(page, KM_USER0);
  2340. read_extent_buffer(src, kaddr + offset, src_offset, cur);
  2341. kunmap_atomic(kaddr, KM_USER0);
  2342. src_offset += cur;
  2343. len -= cur;
  2344. offset = 0;
  2345. i++;
  2346. }
  2347. }
  2348. EXPORT_SYMBOL(copy_extent_buffer);
  2349. static void move_pages(struct page *dst_page, struct page *src_page,
  2350. unsigned long dst_off, unsigned long src_off,
  2351. unsigned long len)
  2352. {
  2353. char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
  2354. if (dst_page == src_page) {
  2355. memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
  2356. } else {
  2357. char *src_kaddr = kmap_atomic(src_page, KM_USER1);
  2358. char *p = dst_kaddr + dst_off + len;
  2359. char *s = src_kaddr + src_off + len;
  2360. while (len--)
  2361. *--p = *--s;
  2362. kunmap_atomic(src_kaddr, KM_USER1);
  2363. }
  2364. kunmap_atomic(dst_kaddr, KM_USER0);
  2365. }
  2366. static void copy_pages(struct page *dst_page, struct page *src_page,
  2367. unsigned long dst_off, unsigned long src_off,
  2368. unsigned long len)
  2369. {
  2370. char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
  2371. char *src_kaddr;
  2372. if (dst_page != src_page)
  2373. src_kaddr = kmap_atomic(src_page, KM_USER1);
  2374. else
  2375. src_kaddr = dst_kaddr;
  2376. memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
  2377. kunmap_atomic(dst_kaddr, KM_USER0);
  2378. if (dst_page != src_page)
  2379. kunmap_atomic(src_kaddr, KM_USER1);
  2380. }
  2381. void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  2382. unsigned long src_offset, unsigned long len)
  2383. {
  2384. size_t cur;
  2385. size_t dst_off_in_page;
  2386. size_t src_off_in_page;
  2387. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  2388. unsigned long dst_i;
  2389. unsigned long src_i;
  2390. if (src_offset + len > dst->len) {
  2391. printk("memmove bogus src_offset %lu move len %lu len %lu\n",
  2392. src_offset, len, dst->len);
  2393. BUG_ON(1);
  2394. }
  2395. if (dst_offset + len > dst->len) {
  2396. printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
  2397. dst_offset, len, dst->len);
  2398. BUG_ON(1);
  2399. }
  2400. while(len > 0) {
  2401. dst_off_in_page = (start_offset + dst_offset) &
  2402. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2403. src_off_in_page = (start_offset + src_offset) &
  2404. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2405. dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
  2406. src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
  2407. cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
  2408. src_off_in_page));
  2409. cur = min_t(unsigned long, cur,
  2410. (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
  2411. copy_pages(extent_buffer_page(dst, dst_i),
  2412. extent_buffer_page(dst, src_i),
  2413. dst_off_in_page, src_off_in_page, cur);
  2414. src_offset += cur;
  2415. dst_offset += cur;
  2416. len -= cur;
  2417. }
  2418. }
  2419. EXPORT_SYMBOL(memcpy_extent_buffer);
  2420. void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  2421. unsigned long src_offset, unsigned long len)
  2422. {
  2423. size_t cur;
  2424. size_t dst_off_in_page;
  2425. size_t src_off_in_page;
  2426. unsigned long dst_end = dst_offset + len - 1;
  2427. unsigned long src_end = src_offset + len - 1;
  2428. size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
  2429. unsigned long dst_i;
  2430. unsigned long src_i;
  2431. if (src_offset + len > dst->len) {
  2432. printk("memmove bogus src_offset %lu move len %lu len %lu\n",
  2433. src_offset, len, dst->len);
  2434. BUG_ON(1);
  2435. }
  2436. if (dst_offset + len > dst->len) {
  2437. printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
  2438. dst_offset, len, dst->len);
  2439. BUG_ON(1);
  2440. }
  2441. if (dst_offset < src_offset) {
  2442. memcpy_extent_buffer(dst, dst_offset, src_offset, len);
  2443. return;
  2444. }
  2445. while(len > 0) {
  2446. dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
  2447. src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
  2448. dst_off_in_page = (start_offset + dst_end) &
  2449. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2450. src_off_in_page = (start_offset + src_end) &
  2451. ((unsigned long)PAGE_CACHE_SIZE - 1);
  2452. cur = min_t(unsigned long, len, src_off_in_page + 1);
  2453. cur = min(cur, dst_off_in_page + 1);
  2454. move_pages(extent_buffer_page(dst, dst_i),
  2455. extent_buffer_page(dst, src_i),
  2456. dst_off_in_page - cur + 1,
  2457. src_off_in_page - cur + 1, cur);
  2458. dst_end -= cur;
  2459. src_end -= cur;
  2460. len -= cur;
  2461. }
  2462. }
  2463. EXPORT_SYMBOL(memmove_extent_buffer);