free-space-cache.c 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854
  1. /*
  2. * Copyright (C) 2008 Red Hat. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/pagemap.h>
  19. #include <linux/sched.h>
  20. #include <linux/slab.h>
  21. #include <linux/math64.h>
  22. #include <linux/ratelimit.h>
  23. #include "ctree.h"
  24. #include "free-space-cache.h"
  25. #include "transaction.h"
  26. #include "disk-io.h"
  27. #include "extent_io.h"
  28. #include "inode-map.h"
  29. #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
  30. #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
  31. static int link_free_space(struct btrfs_free_space_ctl *ctl,
  32. struct btrfs_free_space *info);
  33. static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
  34. struct btrfs_path *path,
  35. u64 offset)
  36. {
  37. struct btrfs_key key;
  38. struct btrfs_key location;
  39. struct btrfs_disk_key disk_key;
  40. struct btrfs_free_space_header *header;
  41. struct extent_buffer *leaf;
  42. struct inode *inode = NULL;
  43. int ret;
  44. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  45. key.offset = offset;
  46. key.type = 0;
  47. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  48. if (ret < 0)
  49. return ERR_PTR(ret);
  50. if (ret > 0) {
  51. btrfs_release_path(path);
  52. return ERR_PTR(-ENOENT);
  53. }
  54. leaf = path->nodes[0];
  55. header = btrfs_item_ptr(leaf, path->slots[0],
  56. struct btrfs_free_space_header);
  57. btrfs_free_space_key(leaf, header, &disk_key);
  58. btrfs_disk_key_to_cpu(&location, &disk_key);
  59. btrfs_release_path(path);
  60. inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
  61. if (!inode)
  62. return ERR_PTR(-ENOENT);
  63. if (IS_ERR(inode))
  64. return inode;
  65. if (is_bad_inode(inode)) {
  66. iput(inode);
  67. return ERR_PTR(-ENOENT);
  68. }
  69. inode->i_mapping->flags &= ~__GFP_FS;
  70. return inode;
  71. }
  72. struct inode *lookup_free_space_inode(struct btrfs_root *root,
  73. struct btrfs_block_group_cache
  74. *block_group, struct btrfs_path *path)
  75. {
  76. struct inode *inode = NULL;
  77. u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
  78. spin_lock(&block_group->lock);
  79. if (block_group->inode)
  80. inode = igrab(block_group->inode);
  81. spin_unlock(&block_group->lock);
  82. if (inode)
  83. return inode;
  84. inode = __lookup_free_space_inode(root, path,
  85. block_group->key.objectid);
  86. if (IS_ERR(inode))
  87. return inode;
  88. spin_lock(&block_group->lock);
  89. if (!((BTRFS_I(inode)->flags & flags) == flags)) {
  90. printk(KERN_INFO "Old style space inode found, converting.\n");
  91. BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
  92. BTRFS_INODE_NODATACOW;
  93. block_group->disk_cache_state = BTRFS_DC_CLEAR;
  94. }
  95. if (!block_group->iref) {
  96. block_group->inode = igrab(inode);
  97. block_group->iref = 1;
  98. }
  99. spin_unlock(&block_group->lock);
  100. return inode;
  101. }
  102. int __create_free_space_inode(struct btrfs_root *root,
  103. struct btrfs_trans_handle *trans,
  104. struct btrfs_path *path, u64 ino, u64 offset)
  105. {
  106. struct btrfs_key key;
  107. struct btrfs_disk_key disk_key;
  108. struct btrfs_free_space_header *header;
  109. struct btrfs_inode_item *inode_item;
  110. struct extent_buffer *leaf;
  111. u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
  112. int ret;
  113. ret = btrfs_insert_empty_inode(trans, root, path, ino);
  114. if (ret)
  115. return ret;
  116. /* We inline crc's for the free disk space cache */
  117. if (ino != BTRFS_FREE_INO_OBJECTID)
  118. flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
  119. leaf = path->nodes[0];
  120. inode_item = btrfs_item_ptr(leaf, path->slots[0],
  121. struct btrfs_inode_item);
  122. btrfs_item_key(leaf, &disk_key, path->slots[0]);
  123. memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
  124. sizeof(*inode_item));
  125. btrfs_set_inode_generation(leaf, inode_item, trans->transid);
  126. btrfs_set_inode_size(leaf, inode_item, 0);
  127. btrfs_set_inode_nbytes(leaf, inode_item, 0);
  128. btrfs_set_inode_uid(leaf, inode_item, 0);
  129. btrfs_set_inode_gid(leaf, inode_item, 0);
  130. btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
  131. btrfs_set_inode_flags(leaf, inode_item, flags);
  132. btrfs_set_inode_nlink(leaf, inode_item, 1);
  133. btrfs_set_inode_transid(leaf, inode_item, trans->transid);
  134. btrfs_set_inode_block_group(leaf, inode_item, offset);
  135. btrfs_mark_buffer_dirty(leaf);
  136. btrfs_release_path(path);
  137. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  138. key.offset = offset;
  139. key.type = 0;
  140. ret = btrfs_insert_empty_item(trans, root, path, &key,
  141. sizeof(struct btrfs_free_space_header));
  142. if (ret < 0) {
  143. btrfs_release_path(path);
  144. return ret;
  145. }
  146. leaf = path->nodes[0];
  147. header = btrfs_item_ptr(leaf, path->slots[0],
  148. struct btrfs_free_space_header);
  149. memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
  150. btrfs_set_free_space_key(leaf, header, &disk_key);
  151. btrfs_mark_buffer_dirty(leaf);
  152. btrfs_release_path(path);
  153. return 0;
  154. }
  155. int create_free_space_inode(struct btrfs_root *root,
  156. struct btrfs_trans_handle *trans,
  157. struct btrfs_block_group_cache *block_group,
  158. struct btrfs_path *path)
  159. {
  160. int ret;
  161. u64 ino;
  162. ret = btrfs_find_free_objectid(root, &ino);
  163. if (ret < 0)
  164. return ret;
  165. return __create_free_space_inode(root, trans, path, ino,
  166. block_group->key.objectid);
  167. }
  168. int btrfs_truncate_free_space_cache(struct btrfs_root *root,
  169. struct btrfs_trans_handle *trans,
  170. struct btrfs_path *path,
  171. struct inode *inode)
  172. {
  173. struct btrfs_block_rsv *rsv;
  174. u64 needed_bytes;
  175. loff_t oldsize;
  176. int ret = 0;
  177. rsv = trans->block_rsv;
  178. trans->block_rsv = &root->fs_info->global_block_rsv;
  179. /* 1 for slack space, 1 for updating the inode */
  180. needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
  181. btrfs_calc_trans_metadata_size(root, 1);
  182. spin_lock(&trans->block_rsv->lock);
  183. if (trans->block_rsv->reserved < needed_bytes) {
  184. spin_unlock(&trans->block_rsv->lock);
  185. trans->block_rsv = rsv;
  186. return -ENOSPC;
  187. }
  188. spin_unlock(&trans->block_rsv->lock);
  189. oldsize = i_size_read(inode);
  190. btrfs_i_size_write(inode, 0);
  191. truncate_pagecache(inode, oldsize, 0);
  192. /*
  193. * We don't need an orphan item because truncating the free space cache
  194. * will never be split across transactions.
  195. */
  196. ret = btrfs_truncate_inode_items(trans, root, inode,
  197. 0, BTRFS_EXTENT_DATA_KEY);
  198. if (ret) {
  199. trans->block_rsv = rsv;
  200. WARN_ON(1);
  201. return ret;
  202. }
  203. ret = btrfs_update_inode(trans, root, inode);
  204. trans->block_rsv = rsv;
  205. return ret;
  206. }
  207. static int readahead_cache(struct inode *inode)
  208. {
  209. struct file_ra_state *ra;
  210. unsigned long last_index;
  211. ra = kzalloc(sizeof(*ra), GFP_NOFS);
  212. if (!ra)
  213. return -ENOMEM;
  214. file_ra_state_init(ra, inode->i_mapping);
  215. last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
  216. page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
  217. kfree(ra);
  218. return 0;
  219. }
  220. struct io_ctl {
  221. void *cur, *orig;
  222. struct page *page;
  223. struct page **pages;
  224. struct btrfs_root *root;
  225. unsigned long size;
  226. int index;
  227. int num_pages;
  228. unsigned check_crcs:1;
  229. };
  230. static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
  231. struct btrfs_root *root)
  232. {
  233. memset(io_ctl, 0, sizeof(struct io_ctl));
  234. io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
  235. PAGE_CACHE_SHIFT;
  236. io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
  237. GFP_NOFS);
  238. if (!io_ctl->pages)
  239. return -ENOMEM;
  240. io_ctl->root = root;
  241. if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
  242. io_ctl->check_crcs = 1;
  243. return 0;
  244. }
  245. static void io_ctl_free(struct io_ctl *io_ctl)
  246. {
  247. kfree(io_ctl->pages);
  248. }
  249. static void io_ctl_unmap_page(struct io_ctl *io_ctl)
  250. {
  251. if (io_ctl->cur) {
  252. kunmap(io_ctl->page);
  253. io_ctl->cur = NULL;
  254. io_ctl->orig = NULL;
  255. }
  256. }
  257. static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
  258. {
  259. WARN_ON(io_ctl->cur);
  260. BUG_ON(io_ctl->index >= io_ctl->num_pages);
  261. io_ctl->page = io_ctl->pages[io_ctl->index++];
  262. io_ctl->cur = kmap(io_ctl->page);
  263. io_ctl->orig = io_ctl->cur;
  264. io_ctl->size = PAGE_CACHE_SIZE;
  265. if (clear)
  266. memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
  267. }
  268. static void io_ctl_drop_pages(struct io_ctl *io_ctl)
  269. {
  270. int i;
  271. io_ctl_unmap_page(io_ctl);
  272. for (i = 0; i < io_ctl->num_pages; i++) {
  273. ClearPageChecked(io_ctl->pages[i]);
  274. unlock_page(io_ctl->pages[i]);
  275. page_cache_release(io_ctl->pages[i]);
  276. }
  277. }
  278. static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
  279. int uptodate)
  280. {
  281. struct page *page;
  282. gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
  283. int i;
  284. for (i = 0; i < io_ctl->num_pages; i++) {
  285. page = find_or_create_page(inode->i_mapping, i, mask);
  286. if (!page) {
  287. io_ctl_drop_pages(io_ctl);
  288. return -ENOMEM;
  289. }
  290. io_ctl->pages[i] = page;
  291. if (uptodate && !PageUptodate(page)) {
  292. btrfs_readpage(NULL, page);
  293. lock_page(page);
  294. if (!PageUptodate(page)) {
  295. printk(KERN_ERR "btrfs: error reading free "
  296. "space cache\n");
  297. io_ctl_drop_pages(io_ctl);
  298. return -EIO;
  299. }
  300. }
  301. }
  302. return 0;
  303. }
  304. static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
  305. {
  306. u64 *val;
  307. io_ctl_map_page(io_ctl, 1);
  308. /*
  309. * Skip the csum areas. If we don't check crcs then we just have a
  310. * 64bit chunk at the front of the first page.
  311. */
  312. if (io_ctl->check_crcs) {
  313. io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
  314. io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
  315. } else {
  316. io_ctl->cur += sizeof(u64);
  317. io_ctl->size -= sizeof(u64) * 2;
  318. }
  319. val = io_ctl->cur;
  320. *val = cpu_to_le64(generation);
  321. io_ctl->cur += sizeof(u64);
  322. }
  323. static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
  324. {
  325. u64 *gen;
  326. /*
  327. * Skip the crc area. If we don't check crcs then we just have a 64bit
  328. * chunk at the front of the first page.
  329. */
  330. if (io_ctl->check_crcs) {
  331. io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
  332. io_ctl->size -= sizeof(u64) +
  333. (sizeof(u32) * io_ctl->num_pages);
  334. } else {
  335. io_ctl->cur += sizeof(u64);
  336. io_ctl->size -= sizeof(u64) * 2;
  337. }
  338. gen = io_ctl->cur;
  339. if (le64_to_cpu(*gen) != generation) {
  340. printk_ratelimited(KERN_ERR "btrfs: space cache generation "
  341. "(%Lu) does not match inode (%Lu)\n", *gen,
  342. generation);
  343. io_ctl_unmap_page(io_ctl);
  344. return -EIO;
  345. }
  346. io_ctl->cur += sizeof(u64);
  347. return 0;
  348. }
  349. static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
  350. {
  351. u32 *tmp;
  352. u32 crc = ~(u32)0;
  353. unsigned offset = 0;
  354. if (!io_ctl->check_crcs) {
  355. io_ctl_unmap_page(io_ctl);
  356. return;
  357. }
  358. if (index == 0)
  359. offset = sizeof(u32) * io_ctl->num_pages;;
  360. crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
  361. PAGE_CACHE_SIZE - offset);
  362. btrfs_csum_final(crc, (char *)&crc);
  363. io_ctl_unmap_page(io_ctl);
  364. tmp = kmap(io_ctl->pages[0]);
  365. tmp += index;
  366. *tmp = crc;
  367. kunmap(io_ctl->pages[0]);
  368. }
  369. static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
  370. {
  371. u32 *tmp, val;
  372. u32 crc = ~(u32)0;
  373. unsigned offset = 0;
  374. if (!io_ctl->check_crcs) {
  375. io_ctl_map_page(io_ctl, 0);
  376. return 0;
  377. }
  378. if (index == 0)
  379. offset = sizeof(u32) * io_ctl->num_pages;
  380. tmp = kmap(io_ctl->pages[0]);
  381. tmp += index;
  382. val = *tmp;
  383. kunmap(io_ctl->pages[0]);
  384. io_ctl_map_page(io_ctl, 0);
  385. crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
  386. PAGE_CACHE_SIZE - offset);
  387. btrfs_csum_final(crc, (char *)&crc);
  388. if (val != crc) {
  389. printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
  390. "space cache\n");
  391. io_ctl_unmap_page(io_ctl);
  392. return -EIO;
  393. }
  394. return 0;
  395. }
  396. static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
  397. void *bitmap)
  398. {
  399. struct btrfs_free_space_entry *entry;
  400. if (!io_ctl->cur)
  401. return -ENOSPC;
  402. entry = io_ctl->cur;
  403. entry->offset = cpu_to_le64(offset);
  404. entry->bytes = cpu_to_le64(bytes);
  405. entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
  406. BTRFS_FREE_SPACE_EXTENT;
  407. io_ctl->cur += sizeof(struct btrfs_free_space_entry);
  408. io_ctl->size -= sizeof(struct btrfs_free_space_entry);
  409. if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
  410. return 0;
  411. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  412. /* No more pages to map */
  413. if (io_ctl->index >= io_ctl->num_pages)
  414. return 0;
  415. /* map the next page */
  416. io_ctl_map_page(io_ctl, 1);
  417. return 0;
  418. }
  419. static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
  420. {
  421. if (!io_ctl->cur)
  422. return -ENOSPC;
  423. /*
  424. * If we aren't at the start of the current page, unmap this one and
  425. * map the next one if there is any left.
  426. */
  427. if (io_ctl->cur != io_ctl->orig) {
  428. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  429. if (io_ctl->index >= io_ctl->num_pages)
  430. return -ENOSPC;
  431. io_ctl_map_page(io_ctl, 0);
  432. }
  433. memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
  434. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  435. if (io_ctl->index < io_ctl->num_pages)
  436. io_ctl_map_page(io_ctl, 0);
  437. return 0;
  438. }
  439. static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
  440. {
  441. /*
  442. * If we're not on the boundary we know we've modified the page and we
  443. * need to crc the page.
  444. */
  445. if (io_ctl->cur != io_ctl->orig)
  446. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  447. else
  448. io_ctl_unmap_page(io_ctl);
  449. while (io_ctl->index < io_ctl->num_pages) {
  450. io_ctl_map_page(io_ctl, 1);
  451. io_ctl_set_crc(io_ctl, io_ctl->index - 1);
  452. }
  453. }
  454. static int io_ctl_read_entry(struct io_ctl *io_ctl,
  455. struct btrfs_free_space *entry, u8 *type)
  456. {
  457. struct btrfs_free_space_entry *e;
  458. e = io_ctl->cur;
  459. entry->offset = le64_to_cpu(e->offset);
  460. entry->bytes = le64_to_cpu(e->bytes);
  461. *type = e->type;
  462. io_ctl->cur += sizeof(struct btrfs_free_space_entry);
  463. io_ctl->size -= sizeof(struct btrfs_free_space_entry);
  464. if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
  465. return 0;
  466. io_ctl_unmap_page(io_ctl);
  467. if (io_ctl->index >= io_ctl->num_pages)
  468. return 0;
  469. return io_ctl_check_crc(io_ctl, io_ctl->index);
  470. }
  471. static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
  472. struct btrfs_free_space *entry)
  473. {
  474. int ret;
  475. if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
  476. io_ctl_unmap_page(io_ctl);
  477. ret = io_ctl_check_crc(io_ctl, io_ctl->index);
  478. if (ret)
  479. return ret;
  480. memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
  481. io_ctl_unmap_page(io_ctl);
  482. return 0;
  483. }
  484. int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
  485. struct btrfs_free_space_ctl *ctl,
  486. struct btrfs_path *path, u64 offset)
  487. {
  488. struct btrfs_free_space_header *header;
  489. struct extent_buffer *leaf;
  490. struct io_ctl io_ctl;
  491. struct btrfs_key key;
  492. struct btrfs_free_space *e, *n;
  493. struct list_head bitmaps;
  494. u64 num_entries;
  495. u64 num_bitmaps;
  496. u64 generation;
  497. u8 type;
  498. int ret = 0;
  499. INIT_LIST_HEAD(&bitmaps);
  500. /* Nothing in the space cache, goodbye */
  501. if (!i_size_read(inode))
  502. return 0;
  503. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  504. key.offset = offset;
  505. key.type = 0;
  506. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  507. if (ret < 0)
  508. return 0;
  509. else if (ret > 0) {
  510. btrfs_release_path(path);
  511. return 0;
  512. }
  513. ret = -1;
  514. leaf = path->nodes[0];
  515. header = btrfs_item_ptr(leaf, path->slots[0],
  516. struct btrfs_free_space_header);
  517. num_entries = btrfs_free_space_entries(leaf, header);
  518. num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
  519. generation = btrfs_free_space_generation(leaf, header);
  520. btrfs_release_path(path);
  521. if (BTRFS_I(inode)->generation != generation) {
  522. printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
  523. " not match free space cache generation (%llu)\n",
  524. (unsigned long long)BTRFS_I(inode)->generation,
  525. (unsigned long long)generation);
  526. return 0;
  527. }
  528. if (!num_entries)
  529. return 0;
  530. io_ctl_init(&io_ctl, inode, root);
  531. ret = readahead_cache(inode);
  532. if (ret)
  533. goto out;
  534. ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
  535. if (ret)
  536. goto out;
  537. ret = io_ctl_check_crc(&io_ctl, 0);
  538. if (ret)
  539. goto free_cache;
  540. ret = io_ctl_check_generation(&io_ctl, generation);
  541. if (ret)
  542. goto free_cache;
  543. while (num_entries) {
  544. e = kmem_cache_zalloc(btrfs_free_space_cachep,
  545. GFP_NOFS);
  546. if (!e)
  547. goto free_cache;
  548. ret = io_ctl_read_entry(&io_ctl, e, &type);
  549. if (ret) {
  550. kmem_cache_free(btrfs_free_space_cachep, e);
  551. goto free_cache;
  552. }
  553. if (!e->bytes) {
  554. kmem_cache_free(btrfs_free_space_cachep, e);
  555. goto free_cache;
  556. }
  557. if (type == BTRFS_FREE_SPACE_EXTENT) {
  558. spin_lock(&ctl->tree_lock);
  559. ret = link_free_space(ctl, e);
  560. spin_unlock(&ctl->tree_lock);
  561. if (ret) {
  562. printk(KERN_ERR "Duplicate entries in "
  563. "free space cache, dumping\n");
  564. kmem_cache_free(btrfs_free_space_cachep, e);
  565. goto free_cache;
  566. }
  567. } else {
  568. BUG_ON(!num_bitmaps);
  569. num_bitmaps--;
  570. e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
  571. if (!e->bitmap) {
  572. kmem_cache_free(
  573. btrfs_free_space_cachep, e);
  574. goto free_cache;
  575. }
  576. spin_lock(&ctl->tree_lock);
  577. ret = link_free_space(ctl, e);
  578. ctl->total_bitmaps++;
  579. ctl->op->recalc_thresholds(ctl);
  580. spin_unlock(&ctl->tree_lock);
  581. if (ret) {
  582. printk(KERN_ERR "Duplicate entries in "
  583. "free space cache, dumping\n");
  584. kmem_cache_free(btrfs_free_space_cachep, e);
  585. goto free_cache;
  586. }
  587. list_add_tail(&e->list, &bitmaps);
  588. }
  589. num_entries--;
  590. }
  591. /*
  592. * We add the bitmaps at the end of the entries in order that
  593. * the bitmap entries are added to the cache.
  594. */
  595. list_for_each_entry_safe(e, n, &bitmaps, list) {
  596. list_del_init(&e->list);
  597. ret = io_ctl_read_bitmap(&io_ctl, e);
  598. if (ret)
  599. goto free_cache;
  600. }
  601. io_ctl_drop_pages(&io_ctl);
  602. ret = 1;
  603. out:
  604. io_ctl_free(&io_ctl);
  605. return ret;
  606. free_cache:
  607. io_ctl_drop_pages(&io_ctl);
  608. __btrfs_remove_free_space_cache(ctl);
  609. goto out;
  610. }
  611. int load_free_space_cache(struct btrfs_fs_info *fs_info,
  612. struct btrfs_block_group_cache *block_group)
  613. {
  614. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  615. struct btrfs_root *root = fs_info->tree_root;
  616. struct inode *inode;
  617. struct btrfs_path *path;
  618. int ret = 0;
  619. bool matched;
  620. u64 used = btrfs_block_group_used(&block_group->item);
  621. /*
  622. * If we're unmounting then just return, since this does a search on the
  623. * normal root and not the commit root and we could deadlock.
  624. */
  625. if (btrfs_fs_closing(fs_info))
  626. return 0;
  627. /*
  628. * If this block group has been marked to be cleared for one reason or
  629. * another then we can't trust the on disk cache, so just return.
  630. */
  631. spin_lock(&block_group->lock);
  632. if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
  633. spin_unlock(&block_group->lock);
  634. return 0;
  635. }
  636. spin_unlock(&block_group->lock);
  637. path = btrfs_alloc_path();
  638. if (!path)
  639. return 0;
  640. inode = lookup_free_space_inode(root, block_group, path);
  641. if (IS_ERR(inode)) {
  642. btrfs_free_path(path);
  643. return 0;
  644. }
  645. /* We may have converted the inode and made the cache invalid. */
  646. spin_lock(&block_group->lock);
  647. if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
  648. spin_unlock(&block_group->lock);
  649. goto out;
  650. }
  651. spin_unlock(&block_group->lock);
  652. ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
  653. path, block_group->key.objectid);
  654. btrfs_free_path(path);
  655. if (ret <= 0)
  656. goto out;
  657. spin_lock(&ctl->tree_lock);
  658. matched = (ctl->free_space == (block_group->key.offset - used -
  659. block_group->bytes_super));
  660. spin_unlock(&ctl->tree_lock);
  661. if (!matched) {
  662. __btrfs_remove_free_space_cache(ctl);
  663. printk(KERN_ERR "block group %llu has an wrong amount of free "
  664. "space\n", block_group->key.objectid);
  665. ret = -1;
  666. }
  667. out:
  668. if (ret < 0) {
  669. /* This cache is bogus, make sure it gets cleared */
  670. spin_lock(&block_group->lock);
  671. block_group->disk_cache_state = BTRFS_DC_CLEAR;
  672. spin_unlock(&block_group->lock);
  673. ret = 0;
  674. printk(KERN_ERR "btrfs: failed to load free space cache "
  675. "for block group %llu\n", block_group->key.objectid);
  676. }
  677. iput(inode);
  678. return ret;
  679. }
  680. /**
  681. * __btrfs_write_out_cache - write out cached info to an inode
  682. * @root - the root the inode belongs to
  683. * @ctl - the free space cache we are going to write out
  684. * @block_group - the block_group for this cache if it belongs to a block_group
  685. * @trans - the trans handle
  686. * @path - the path to use
  687. * @offset - the offset for the key we'll insert
  688. *
  689. * This function writes out a free space cache struct to disk for quick recovery
  690. * on mount. This will return 0 if it was successfull in writing the cache out,
  691. * and -1 if it was not.
  692. */
  693. int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
  694. struct btrfs_free_space_ctl *ctl,
  695. struct btrfs_block_group_cache *block_group,
  696. struct btrfs_trans_handle *trans,
  697. struct btrfs_path *path, u64 offset)
  698. {
  699. struct btrfs_free_space_header *header;
  700. struct extent_buffer *leaf;
  701. struct rb_node *node;
  702. struct list_head *pos, *n;
  703. struct extent_state *cached_state = NULL;
  704. struct btrfs_free_cluster *cluster = NULL;
  705. struct extent_io_tree *unpin = NULL;
  706. struct io_ctl io_ctl;
  707. struct list_head bitmap_list;
  708. struct btrfs_key key;
  709. u64 start, end, len;
  710. int entries = 0;
  711. int bitmaps = 0;
  712. int ret;
  713. int err = -1;
  714. INIT_LIST_HEAD(&bitmap_list);
  715. if (!i_size_read(inode))
  716. return -1;
  717. io_ctl_init(&io_ctl, inode, root);
  718. /* Get the cluster for this block_group if it exists */
  719. if (block_group && !list_empty(&block_group->cluster_list))
  720. cluster = list_entry(block_group->cluster_list.next,
  721. struct btrfs_free_cluster,
  722. block_group_list);
  723. /*
  724. * We shouldn't have switched the pinned extents yet so this is the
  725. * right one
  726. */
  727. unpin = root->fs_info->pinned_extents;
  728. /* Lock all pages first so we can lock the extent safely. */
  729. io_ctl_prepare_pages(&io_ctl, inode, 0);
  730. lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
  731. 0, &cached_state, GFP_NOFS);
  732. /*
  733. * When searching for pinned extents, we need to start at our start
  734. * offset.
  735. */
  736. if (block_group)
  737. start = block_group->key.objectid;
  738. node = rb_first(&ctl->free_space_offset);
  739. if (!node && cluster) {
  740. node = rb_first(&cluster->root);
  741. cluster = NULL;
  742. }
  743. /* Make sure we can fit our crcs into the first page */
  744. if (io_ctl.check_crcs &&
  745. (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
  746. WARN_ON(1);
  747. goto out_nospc;
  748. }
  749. io_ctl_set_generation(&io_ctl, trans->transid);
  750. /* Write out the extent entries */
  751. while (node) {
  752. struct btrfs_free_space *e;
  753. e = rb_entry(node, struct btrfs_free_space, offset_index);
  754. entries++;
  755. ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
  756. e->bitmap);
  757. if (ret)
  758. goto out_nospc;
  759. if (e->bitmap) {
  760. list_add_tail(&e->list, &bitmap_list);
  761. bitmaps++;
  762. }
  763. node = rb_next(node);
  764. if (!node && cluster) {
  765. node = rb_first(&cluster->root);
  766. cluster = NULL;
  767. }
  768. }
  769. /*
  770. * We want to add any pinned extents to our free space cache
  771. * so we don't leak the space
  772. */
  773. while (block_group && (start < block_group->key.objectid +
  774. block_group->key.offset)) {
  775. ret = find_first_extent_bit(unpin, start, &start, &end,
  776. EXTENT_DIRTY);
  777. if (ret) {
  778. ret = 0;
  779. break;
  780. }
  781. /* This pinned extent is out of our range */
  782. if (start >= block_group->key.objectid +
  783. block_group->key.offset)
  784. break;
  785. len = block_group->key.objectid +
  786. block_group->key.offset - start;
  787. len = min(len, end + 1 - start);
  788. entries++;
  789. ret = io_ctl_add_entry(&io_ctl, start, len, NULL);
  790. if (ret)
  791. goto out_nospc;
  792. start = end + 1;
  793. }
  794. /* Write out the bitmaps */
  795. list_for_each_safe(pos, n, &bitmap_list) {
  796. struct btrfs_free_space *entry =
  797. list_entry(pos, struct btrfs_free_space, list);
  798. ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
  799. if (ret)
  800. goto out_nospc;
  801. list_del_init(&entry->list);
  802. }
  803. /* Zero out the rest of the pages just to make sure */
  804. io_ctl_zero_remaining_pages(&io_ctl);
  805. ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
  806. 0, i_size_read(inode), &cached_state);
  807. io_ctl_drop_pages(&io_ctl);
  808. unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
  809. i_size_read(inode) - 1, &cached_state, GFP_NOFS);
  810. if (ret)
  811. goto out;
  812. ret = filemap_write_and_wait(inode->i_mapping);
  813. if (ret)
  814. goto out;
  815. key.objectid = BTRFS_FREE_SPACE_OBJECTID;
  816. key.offset = offset;
  817. key.type = 0;
  818. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  819. if (ret < 0) {
  820. clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
  821. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
  822. GFP_NOFS);
  823. goto out;
  824. }
  825. leaf = path->nodes[0];
  826. if (ret > 0) {
  827. struct btrfs_key found_key;
  828. BUG_ON(!path->slots[0]);
  829. path->slots[0]--;
  830. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  831. if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
  832. found_key.offset != offset) {
  833. clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
  834. inode->i_size - 1,
  835. EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
  836. NULL, GFP_NOFS);
  837. btrfs_release_path(path);
  838. goto out;
  839. }
  840. }
  841. BTRFS_I(inode)->generation = trans->transid;
  842. header = btrfs_item_ptr(leaf, path->slots[0],
  843. struct btrfs_free_space_header);
  844. btrfs_set_free_space_entries(leaf, header, entries);
  845. btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
  846. btrfs_set_free_space_generation(leaf, header, trans->transid);
  847. btrfs_mark_buffer_dirty(leaf);
  848. btrfs_release_path(path);
  849. err = 0;
  850. out:
  851. io_ctl_free(&io_ctl);
  852. if (err) {
  853. invalidate_inode_pages2(inode->i_mapping);
  854. BTRFS_I(inode)->generation = 0;
  855. }
  856. btrfs_update_inode(trans, root, inode);
  857. return err;
  858. out_nospc:
  859. list_for_each_safe(pos, n, &bitmap_list) {
  860. struct btrfs_free_space *entry =
  861. list_entry(pos, struct btrfs_free_space, list);
  862. list_del_init(&entry->list);
  863. }
  864. io_ctl_drop_pages(&io_ctl);
  865. unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
  866. i_size_read(inode) - 1, &cached_state, GFP_NOFS);
  867. goto out;
  868. }
  869. int btrfs_write_out_cache(struct btrfs_root *root,
  870. struct btrfs_trans_handle *trans,
  871. struct btrfs_block_group_cache *block_group,
  872. struct btrfs_path *path)
  873. {
  874. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  875. struct inode *inode;
  876. int ret = 0;
  877. root = root->fs_info->tree_root;
  878. spin_lock(&block_group->lock);
  879. if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
  880. spin_unlock(&block_group->lock);
  881. return 0;
  882. }
  883. spin_unlock(&block_group->lock);
  884. inode = lookup_free_space_inode(root, block_group, path);
  885. if (IS_ERR(inode))
  886. return 0;
  887. ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
  888. path, block_group->key.objectid);
  889. if (ret) {
  890. spin_lock(&block_group->lock);
  891. block_group->disk_cache_state = BTRFS_DC_ERROR;
  892. spin_unlock(&block_group->lock);
  893. ret = 0;
  894. #ifdef DEBUG
  895. printk(KERN_ERR "btrfs: failed to write free space cace "
  896. "for block group %llu\n", block_group->key.objectid);
  897. #endif
  898. }
  899. iput(inode);
  900. return ret;
  901. }
  902. static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
  903. u64 offset)
  904. {
  905. BUG_ON(offset < bitmap_start);
  906. offset -= bitmap_start;
  907. return (unsigned long)(div_u64(offset, unit));
  908. }
  909. static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
  910. {
  911. return (unsigned long)(div_u64(bytes, unit));
  912. }
  913. static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
  914. u64 offset)
  915. {
  916. u64 bitmap_start;
  917. u64 bytes_per_bitmap;
  918. bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
  919. bitmap_start = offset - ctl->start;
  920. bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
  921. bitmap_start *= bytes_per_bitmap;
  922. bitmap_start += ctl->start;
  923. return bitmap_start;
  924. }
  925. static int tree_insert_offset(struct rb_root *root, u64 offset,
  926. struct rb_node *node, int bitmap)
  927. {
  928. struct rb_node **p = &root->rb_node;
  929. struct rb_node *parent = NULL;
  930. struct btrfs_free_space *info;
  931. while (*p) {
  932. parent = *p;
  933. info = rb_entry(parent, struct btrfs_free_space, offset_index);
  934. if (offset < info->offset) {
  935. p = &(*p)->rb_left;
  936. } else if (offset > info->offset) {
  937. p = &(*p)->rb_right;
  938. } else {
  939. /*
  940. * we could have a bitmap entry and an extent entry
  941. * share the same offset. If this is the case, we want
  942. * the extent entry to always be found first if we do a
  943. * linear search through the tree, since we want to have
  944. * the quickest allocation time, and allocating from an
  945. * extent is faster than allocating from a bitmap. So
  946. * if we're inserting a bitmap and we find an entry at
  947. * this offset, we want to go right, or after this entry
  948. * logically. If we are inserting an extent and we've
  949. * found a bitmap, we want to go left, or before
  950. * logically.
  951. */
  952. if (bitmap) {
  953. if (info->bitmap) {
  954. WARN_ON_ONCE(1);
  955. return -EEXIST;
  956. }
  957. p = &(*p)->rb_right;
  958. } else {
  959. if (!info->bitmap) {
  960. WARN_ON_ONCE(1);
  961. return -EEXIST;
  962. }
  963. p = &(*p)->rb_left;
  964. }
  965. }
  966. }
  967. rb_link_node(node, parent, p);
  968. rb_insert_color(node, root);
  969. return 0;
  970. }
  971. /*
  972. * searches the tree for the given offset.
  973. *
  974. * fuzzy - If this is set, then we are trying to make an allocation, and we just
  975. * want a section that has at least bytes size and comes at or after the given
  976. * offset.
  977. */
  978. static struct btrfs_free_space *
  979. tree_search_offset(struct btrfs_free_space_ctl *ctl,
  980. u64 offset, int bitmap_only, int fuzzy)
  981. {
  982. struct rb_node *n = ctl->free_space_offset.rb_node;
  983. struct btrfs_free_space *entry, *prev = NULL;
  984. /* find entry that is closest to the 'offset' */
  985. while (1) {
  986. if (!n) {
  987. entry = NULL;
  988. break;
  989. }
  990. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  991. prev = entry;
  992. if (offset < entry->offset)
  993. n = n->rb_left;
  994. else if (offset > entry->offset)
  995. n = n->rb_right;
  996. else
  997. break;
  998. }
  999. if (bitmap_only) {
  1000. if (!entry)
  1001. return NULL;
  1002. if (entry->bitmap)
  1003. return entry;
  1004. /*
  1005. * bitmap entry and extent entry may share same offset,
  1006. * in that case, bitmap entry comes after extent entry.
  1007. */
  1008. n = rb_next(n);
  1009. if (!n)
  1010. return NULL;
  1011. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  1012. if (entry->offset != offset)
  1013. return NULL;
  1014. WARN_ON(!entry->bitmap);
  1015. return entry;
  1016. } else if (entry) {
  1017. if (entry->bitmap) {
  1018. /*
  1019. * if previous extent entry covers the offset,
  1020. * we should return it instead of the bitmap entry
  1021. */
  1022. n = &entry->offset_index;
  1023. while (1) {
  1024. n = rb_prev(n);
  1025. if (!n)
  1026. break;
  1027. prev = rb_entry(n, struct btrfs_free_space,
  1028. offset_index);
  1029. if (!prev->bitmap) {
  1030. if (prev->offset + prev->bytes > offset)
  1031. entry = prev;
  1032. break;
  1033. }
  1034. }
  1035. }
  1036. return entry;
  1037. }
  1038. if (!prev)
  1039. return NULL;
  1040. /* find last entry before the 'offset' */
  1041. entry = prev;
  1042. if (entry->offset > offset) {
  1043. n = rb_prev(&entry->offset_index);
  1044. if (n) {
  1045. entry = rb_entry(n, struct btrfs_free_space,
  1046. offset_index);
  1047. BUG_ON(entry->offset > offset);
  1048. } else {
  1049. if (fuzzy)
  1050. return entry;
  1051. else
  1052. return NULL;
  1053. }
  1054. }
  1055. if (entry->bitmap) {
  1056. n = &entry->offset_index;
  1057. while (1) {
  1058. n = rb_prev(n);
  1059. if (!n)
  1060. break;
  1061. prev = rb_entry(n, struct btrfs_free_space,
  1062. offset_index);
  1063. if (!prev->bitmap) {
  1064. if (prev->offset + prev->bytes > offset)
  1065. return prev;
  1066. break;
  1067. }
  1068. }
  1069. if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
  1070. return entry;
  1071. } else if (entry->offset + entry->bytes > offset)
  1072. return entry;
  1073. if (!fuzzy)
  1074. return NULL;
  1075. while (1) {
  1076. if (entry->bitmap) {
  1077. if (entry->offset + BITS_PER_BITMAP *
  1078. ctl->unit > offset)
  1079. break;
  1080. } else {
  1081. if (entry->offset + entry->bytes > offset)
  1082. break;
  1083. }
  1084. n = rb_next(&entry->offset_index);
  1085. if (!n)
  1086. return NULL;
  1087. entry = rb_entry(n, struct btrfs_free_space, offset_index);
  1088. }
  1089. return entry;
  1090. }
  1091. static inline void
  1092. __unlink_free_space(struct btrfs_free_space_ctl *ctl,
  1093. struct btrfs_free_space *info)
  1094. {
  1095. rb_erase(&info->offset_index, &ctl->free_space_offset);
  1096. ctl->free_extents--;
  1097. }
  1098. static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
  1099. struct btrfs_free_space *info)
  1100. {
  1101. __unlink_free_space(ctl, info);
  1102. ctl->free_space -= info->bytes;
  1103. }
  1104. static int link_free_space(struct btrfs_free_space_ctl *ctl,
  1105. struct btrfs_free_space *info)
  1106. {
  1107. int ret = 0;
  1108. BUG_ON(!info->bitmap && !info->bytes);
  1109. ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
  1110. &info->offset_index, (info->bitmap != NULL));
  1111. if (ret)
  1112. return ret;
  1113. ctl->free_space += info->bytes;
  1114. ctl->free_extents++;
  1115. return ret;
  1116. }
  1117. static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
  1118. {
  1119. struct btrfs_block_group_cache *block_group = ctl->private;
  1120. u64 max_bytes;
  1121. u64 bitmap_bytes;
  1122. u64 extent_bytes;
  1123. u64 size = block_group->key.offset;
  1124. u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
  1125. int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
  1126. BUG_ON(ctl->total_bitmaps > max_bitmaps);
  1127. /*
  1128. * The goal is to keep the total amount of memory used per 1gb of space
  1129. * at or below 32k, so we need to adjust how much memory we allow to be
  1130. * used by extent based free space tracking
  1131. */
  1132. if (size < 1024 * 1024 * 1024)
  1133. max_bytes = MAX_CACHE_BYTES_PER_GIG;
  1134. else
  1135. max_bytes = MAX_CACHE_BYTES_PER_GIG *
  1136. div64_u64(size, 1024 * 1024 * 1024);
  1137. /*
  1138. * we want to account for 1 more bitmap than what we have so we can make
  1139. * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
  1140. * we add more bitmaps.
  1141. */
  1142. bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
  1143. if (bitmap_bytes >= max_bytes) {
  1144. ctl->extents_thresh = 0;
  1145. return;
  1146. }
  1147. /*
  1148. * we want the extent entry threshold to always be at most 1/2 the maxw
  1149. * bytes we can have, or whatever is less than that.
  1150. */
  1151. extent_bytes = max_bytes - bitmap_bytes;
  1152. extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
  1153. ctl->extents_thresh =
  1154. div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
  1155. }
  1156. static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
  1157. struct btrfs_free_space *info,
  1158. u64 offset, u64 bytes)
  1159. {
  1160. unsigned long start, count;
  1161. start = offset_to_bit(info->offset, ctl->unit, offset);
  1162. count = bytes_to_bits(bytes, ctl->unit);
  1163. BUG_ON(start + count > BITS_PER_BITMAP);
  1164. bitmap_clear(info->bitmap, start, count);
  1165. info->bytes -= bytes;
  1166. }
  1167. static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
  1168. struct btrfs_free_space *info, u64 offset,
  1169. u64 bytes)
  1170. {
  1171. __bitmap_clear_bits(ctl, info, offset, bytes);
  1172. ctl->free_space -= bytes;
  1173. }
  1174. static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
  1175. struct btrfs_free_space *info, u64 offset,
  1176. u64 bytes)
  1177. {
  1178. unsigned long start, count;
  1179. start = offset_to_bit(info->offset, ctl->unit, offset);
  1180. count = bytes_to_bits(bytes, ctl->unit);
  1181. BUG_ON(start + count > BITS_PER_BITMAP);
  1182. bitmap_set(info->bitmap, start, count);
  1183. info->bytes += bytes;
  1184. ctl->free_space += bytes;
  1185. }
  1186. static int search_bitmap(struct btrfs_free_space_ctl *ctl,
  1187. struct btrfs_free_space *bitmap_info, u64 *offset,
  1188. u64 *bytes)
  1189. {
  1190. unsigned long found_bits = 0;
  1191. unsigned long bits, i;
  1192. unsigned long next_zero;
  1193. i = offset_to_bit(bitmap_info->offset, ctl->unit,
  1194. max_t(u64, *offset, bitmap_info->offset));
  1195. bits = bytes_to_bits(*bytes, ctl->unit);
  1196. for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
  1197. i < BITS_PER_BITMAP;
  1198. i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
  1199. next_zero = find_next_zero_bit(bitmap_info->bitmap,
  1200. BITS_PER_BITMAP, i);
  1201. if ((next_zero - i) >= bits) {
  1202. found_bits = next_zero - i;
  1203. break;
  1204. }
  1205. i = next_zero;
  1206. }
  1207. if (found_bits) {
  1208. *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
  1209. *bytes = (u64)(found_bits) * ctl->unit;
  1210. return 0;
  1211. }
  1212. return -1;
  1213. }
  1214. static struct btrfs_free_space *
  1215. find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
  1216. {
  1217. struct btrfs_free_space *entry;
  1218. struct rb_node *node;
  1219. int ret;
  1220. if (!ctl->free_space_offset.rb_node)
  1221. return NULL;
  1222. entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
  1223. if (!entry)
  1224. return NULL;
  1225. for (node = &entry->offset_index; node; node = rb_next(node)) {
  1226. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  1227. if (entry->bytes < *bytes)
  1228. continue;
  1229. if (entry->bitmap) {
  1230. ret = search_bitmap(ctl, entry, offset, bytes);
  1231. if (!ret)
  1232. return entry;
  1233. continue;
  1234. }
  1235. *offset = entry->offset;
  1236. *bytes = entry->bytes;
  1237. return entry;
  1238. }
  1239. return NULL;
  1240. }
  1241. static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
  1242. struct btrfs_free_space *info, u64 offset)
  1243. {
  1244. info->offset = offset_to_bitmap(ctl, offset);
  1245. info->bytes = 0;
  1246. link_free_space(ctl, info);
  1247. ctl->total_bitmaps++;
  1248. ctl->op->recalc_thresholds(ctl);
  1249. }
  1250. static void free_bitmap(struct btrfs_free_space_ctl *ctl,
  1251. struct btrfs_free_space *bitmap_info)
  1252. {
  1253. unlink_free_space(ctl, bitmap_info);
  1254. kfree(bitmap_info->bitmap);
  1255. kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
  1256. ctl->total_bitmaps--;
  1257. ctl->op->recalc_thresholds(ctl);
  1258. }
  1259. static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
  1260. struct btrfs_free_space *bitmap_info,
  1261. u64 *offset, u64 *bytes)
  1262. {
  1263. u64 end;
  1264. u64 search_start, search_bytes;
  1265. int ret;
  1266. again:
  1267. end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
  1268. /*
  1269. * XXX - this can go away after a few releases.
  1270. *
  1271. * since the only user of btrfs_remove_free_space is the tree logging
  1272. * stuff, and the only way to test that is under crash conditions, we
  1273. * want to have this debug stuff here just in case somethings not
  1274. * working. Search the bitmap for the space we are trying to use to
  1275. * make sure its actually there. If its not there then we need to stop
  1276. * because something has gone wrong.
  1277. */
  1278. search_start = *offset;
  1279. search_bytes = *bytes;
  1280. search_bytes = min(search_bytes, end - search_start + 1);
  1281. ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
  1282. BUG_ON(ret < 0 || search_start != *offset);
  1283. if (*offset > bitmap_info->offset && *offset + *bytes > end) {
  1284. bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
  1285. *bytes -= end - *offset + 1;
  1286. *offset = end + 1;
  1287. } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
  1288. bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
  1289. *bytes = 0;
  1290. }
  1291. if (*bytes) {
  1292. struct rb_node *next = rb_next(&bitmap_info->offset_index);
  1293. if (!bitmap_info->bytes)
  1294. free_bitmap(ctl, bitmap_info);
  1295. /*
  1296. * no entry after this bitmap, but we still have bytes to
  1297. * remove, so something has gone wrong.
  1298. */
  1299. if (!next)
  1300. return -EINVAL;
  1301. bitmap_info = rb_entry(next, struct btrfs_free_space,
  1302. offset_index);
  1303. /*
  1304. * if the next entry isn't a bitmap we need to return to let the
  1305. * extent stuff do its work.
  1306. */
  1307. if (!bitmap_info->bitmap)
  1308. return -EAGAIN;
  1309. /*
  1310. * Ok the next item is a bitmap, but it may not actually hold
  1311. * the information for the rest of this free space stuff, so
  1312. * look for it, and if we don't find it return so we can try
  1313. * everything over again.
  1314. */
  1315. search_start = *offset;
  1316. search_bytes = *bytes;
  1317. ret = search_bitmap(ctl, bitmap_info, &search_start,
  1318. &search_bytes);
  1319. if (ret < 0 || search_start != *offset)
  1320. return -EAGAIN;
  1321. goto again;
  1322. } else if (!bitmap_info->bytes)
  1323. free_bitmap(ctl, bitmap_info);
  1324. return 0;
  1325. }
  1326. static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
  1327. struct btrfs_free_space *info, u64 offset,
  1328. u64 bytes)
  1329. {
  1330. u64 bytes_to_set = 0;
  1331. u64 end;
  1332. end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
  1333. bytes_to_set = min(end - offset, bytes);
  1334. bitmap_set_bits(ctl, info, offset, bytes_to_set);
  1335. return bytes_to_set;
  1336. }
  1337. static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
  1338. struct btrfs_free_space *info)
  1339. {
  1340. struct btrfs_block_group_cache *block_group = ctl->private;
  1341. /*
  1342. * If we are below the extents threshold then we can add this as an
  1343. * extent, and don't have to deal with the bitmap
  1344. */
  1345. if (ctl->free_extents < ctl->extents_thresh) {
  1346. /*
  1347. * If this block group has some small extents we don't want to
  1348. * use up all of our free slots in the cache with them, we want
  1349. * to reserve them to larger extents, however if we have plent
  1350. * of cache left then go ahead an dadd them, no sense in adding
  1351. * the overhead of a bitmap if we don't have to.
  1352. */
  1353. if (info->bytes <= block_group->sectorsize * 4) {
  1354. if (ctl->free_extents * 2 <= ctl->extents_thresh)
  1355. return false;
  1356. } else {
  1357. return false;
  1358. }
  1359. }
  1360. /*
  1361. * some block groups are so tiny they can't be enveloped by a bitmap, so
  1362. * don't even bother to create a bitmap for this
  1363. */
  1364. if (BITS_PER_BITMAP * block_group->sectorsize >
  1365. block_group->key.offset)
  1366. return false;
  1367. return true;
  1368. }
  1369. static struct btrfs_free_space_op free_space_op = {
  1370. .recalc_thresholds = recalculate_thresholds,
  1371. .use_bitmap = use_bitmap,
  1372. };
  1373. static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
  1374. struct btrfs_free_space *info)
  1375. {
  1376. struct btrfs_free_space *bitmap_info;
  1377. struct btrfs_block_group_cache *block_group = NULL;
  1378. int added = 0;
  1379. u64 bytes, offset, bytes_added;
  1380. int ret;
  1381. bytes = info->bytes;
  1382. offset = info->offset;
  1383. if (!ctl->op->use_bitmap(ctl, info))
  1384. return 0;
  1385. if (ctl->op == &free_space_op)
  1386. block_group = ctl->private;
  1387. again:
  1388. /*
  1389. * Since we link bitmaps right into the cluster we need to see if we
  1390. * have a cluster here, and if so and it has our bitmap we need to add
  1391. * the free space to that bitmap.
  1392. */
  1393. if (block_group && !list_empty(&block_group->cluster_list)) {
  1394. struct btrfs_free_cluster *cluster;
  1395. struct rb_node *node;
  1396. struct btrfs_free_space *entry;
  1397. cluster = list_entry(block_group->cluster_list.next,
  1398. struct btrfs_free_cluster,
  1399. block_group_list);
  1400. spin_lock(&cluster->lock);
  1401. node = rb_first(&cluster->root);
  1402. if (!node) {
  1403. spin_unlock(&cluster->lock);
  1404. goto no_cluster_bitmap;
  1405. }
  1406. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  1407. if (!entry->bitmap) {
  1408. spin_unlock(&cluster->lock);
  1409. goto no_cluster_bitmap;
  1410. }
  1411. if (entry->offset == offset_to_bitmap(ctl, offset)) {
  1412. bytes_added = add_bytes_to_bitmap(ctl, entry,
  1413. offset, bytes);
  1414. bytes -= bytes_added;
  1415. offset += bytes_added;
  1416. }
  1417. spin_unlock(&cluster->lock);
  1418. if (!bytes) {
  1419. ret = 1;
  1420. goto out;
  1421. }
  1422. }
  1423. no_cluster_bitmap:
  1424. bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
  1425. 1, 0);
  1426. if (!bitmap_info) {
  1427. BUG_ON(added);
  1428. goto new_bitmap;
  1429. }
  1430. bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
  1431. bytes -= bytes_added;
  1432. offset += bytes_added;
  1433. added = 0;
  1434. if (!bytes) {
  1435. ret = 1;
  1436. goto out;
  1437. } else
  1438. goto again;
  1439. new_bitmap:
  1440. if (info && info->bitmap) {
  1441. add_new_bitmap(ctl, info, offset);
  1442. added = 1;
  1443. info = NULL;
  1444. goto again;
  1445. } else {
  1446. spin_unlock(&ctl->tree_lock);
  1447. /* no pre-allocated info, allocate a new one */
  1448. if (!info) {
  1449. info = kmem_cache_zalloc(btrfs_free_space_cachep,
  1450. GFP_NOFS);
  1451. if (!info) {
  1452. spin_lock(&ctl->tree_lock);
  1453. ret = -ENOMEM;
  1454. goto out;
  1455. }
  1456. }
  1457. /* allocate the bitmap */
  1458. info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
  1459. spin_lock(&ctl->tree_lock);
  1460. if (!info->bitmap) {
  1461. ret = -ENOMEM;
  1462. goto out;
  1463. }
  1464. goto again;
  1465. }
  1466. out:
  1467. if (info) {
  1468. if (info->bitmap)
  1469. kfree(info->bitmap);
  1470. kmem_cache_free(btrfs_free_space_cachep, info);
  1471. }
  1472. return ret;
  1473. }
  1474. static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
  1475. struct btrfs_free_space *info, bool update_stat)
  1476. {
  1477. struct btrfs_free_space *left_info;
  1478. struct btrfs_free_space *right_info;
  1479. bool merged = false;
  1480. u64 offset = info->offset;
  1481. u64 bytes = info->bytes;
  1482. /*
  1483. * first we want to see if there is free space adjacent to the range we
  1484. * are adding, if there is remove that struct and add a new one to
  1485. * cover the entire range
  1486. */
  1487. right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
  1488. if (right_info && rb_prev(&right_info->offset_index))
  1489. left_info = rb_entry(rb_prev(&right_info->offset_index),
  1490. struct btrfs_free_space, offset_index);
  1491. else
  1492. left_info = tree_search_offset(ctl, offset - 1, 0, 0);
  1493. if (right_info && !right_info->bitmap) {
  1494. if (update_stat)
  1495. unlink_free_space(ctl, right_info);
  1496. else
  1497. __unlink_free_space(ctl, right_info);
  1498. info->bytes += right_info->bytes;
  1499. kmem_cache_free(btrfs_free_space_cachep, right_info);
  1500. merged = true;
  1501. }
  1502. if (left_info && !left_info->bitmap &&
  1503. left_info->offset + left_info->bytes == offset) {
  1504. if (update_stat)
  1505. unlink_free_space(ctl, left_info);
  1506. else
  1507. __unlink_free_space(ctl, left_info);
  1508. info->offset = left_info->offset;
  1509. info->bytes += left_info->bytes;
  1510. kmem_cache_free(btrfs_free_space_cachep, left_info);
  1511. merged = true;
  1512. }
  1513. return merged;
  1514. }
  1515. int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
  1516. u64 offset, u64 bytes)
  1517. {
  1518. struct btrfs_free_space *info;
  1519. int ret = 0;
  1520. info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
  1521. if (!info)
  1522. return -ENOMEM;
  1523. info->offset = offset;
  1524. info->bytes = bytes;
  1525. spin_lock(&ctl->tree_lock);
  1526. if (try_merge_free_space(ctl, info, true))
  1527. goto link;
  1528. /*
  1529. * There was no extent directly to the left or right of this new
  1530. * extent then we know we're going to have to allocate a new extent, so
  1531. * before we do that see if we need to drop this into a bitmap
  1532. */
  1533. ret = insert_into_bitmap(ctl, info);
  1534. if (ret < 0) {
  1535. goto out;
  1536. } else if (ret) {
  1537. ret = 0;
  1538. goto out;
  1539. }
  1540. link:
  1541. ret = link_free_space(ctl, info);
  1542. if (ret)
  1543. kmem_cache_free(btrfs_free_space_cachep, info);
  1544. out:
  1545. spin_unlock(&ctl->tree_lock);
  1546. if (ret) {
  1547. printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
  1548. BUG_ON(ret == -EEXIST);
  1549. }
  1550. return ret;
  1551. }
  1552. int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
  1553. u64 offset, u64 bytes)
  1554. {
  1555. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1556. struct btrfs_free_space *info;
  1557. struct btrfs_free_space *next_info = NULL;
  1558. int ret = 0;
  1559. spin_lock(&ctl->tree_lock);
  1560. again:
  1561. info = tree_search_offset(ctl, offset, 0, 0);
  1562. if (!info) {
  1563. /*
  1564. * oops didn't find an extent that matched the space we wanted
  1565. * to remove, look for a bitmap instead
  1566. */
  1567. info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
  1568. 1, 0);
  1569. if (!info) {
  1570. WARN_ON(1);
  1571. goto out_lock;
  1572. }
  1573. }
  1574. if (info->bytes < bytes && rb_next(&info->offset_index)) {
  1575. u64 end;
  1576. next_info = rb_entry(rb_next(&info->offset_index),
  1577. struct btrfs_free_space,
  1578. offset_index);
  1579. if (next_info->bitmap)
  1580. end = next_info->offset +
  1581. BITS_PER_BITMAP * ctl->unit - 1;
  1582. else
  1583. end = next_info->offset + next_info->bytes;
  1584. if (next_info->bytes < bytes ||
  1585. next_info->offset > offset || offset > end) {
  1586. printk(KERN_CRIT "Found free space at %llu, size %llu,"
  1587. " trying to use %llu\n",
  1588. (unsigned long long)info->offset,
  1589. (unsigned long long)info->bytes,
  1590. (unsigned long long)bytes);
  1591. WARN_ON(1);
  1592. ret = -EINVAL;
  1593. goto out_lock;
  1594. }
  1595. info = next_info;
  1596. }
  1597. if (info->bytes == bytes) {
  1598. unlink_free_space(ctl, info);
  1599. if (info->bitmap) {
  1600. kfree(info->bitmap);
  1601. ctl->total_bitmaps--;
  1602. }
  1603. kmem_cache_free(btrfs_free_space_cachep, info);
  1604. ret = 0;
  1605. goto out_lock;
  1606. }
  1607. if (!info->bitmap && info->offset == offset) {
  1608. unlink_free_space(ctl, info);
  1609. info->offset += bytes;
  1610. info->bytes -= bytes;
  1611. ret = link_free_space(ctl, info);
  1612. WARN_ON(ret);
  1613. goto out_lock;
  1614. }
  1615. if (!info->bitmap && info->offset <= offset &&
  1616. info->offset + info->bytes >= offset + bytes) {
  1617. u64 old_start = info->offset;
  1618. /*
  1619. * we're freeing space in the middle of the info,
  1620. * this can happen during tree log replay
  1621. *
  1622. * first unlink the old info and then
  1623. * insert it again after the hole we're creating
  1624. */
  1625. unlink_free_space(ctl, info);
  1626. if (offset + bytes < info->offset + info->bytes) {
  1627. u64 old_end = info->offset + info->bytes;
  1628. info->offset = offset + bytes;
  1629. info->bytes = old_end - info->offset;
  1630. ret = link_free_space(ctl, info);
  1631. WARN_ON(ret);
  1632. if (ret)
  1633. goto out_lock;
  1634. } else {
  1635. /* the hole we're creating ends at the end
  1636. * of the info struct, just free the info
  1637. */
  1638. kmem_cache_free(btrfs_free_space_cachep, info);
  1639. }
  1640. spin_unlock(&ctl->tree_lock);
  1641. /* step two, insert a new info struct to cover
  1642. * anything before the hole
  1643. */
  1644. ret = btrfs_add_free_space(block_group, old_start,
  1645. offset - old_start);
  1646. WARN_ON(ret);
  1647. goto out;
  1648. }
  1649. ret = remove_from_bitmap(ctl, info, &offset, &bytes);
  1650. if (ret == -EAGAIN)
  1651. goto again;
  1652. BUG_ON(ret);
  1653. out_lock:
  1654. spin_unlock(&ctl->tree_lock);
  1655. out:
  1656. return ret;
  1657. }
  1658. void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
  1659. u64 bytes)
  1660. {
  1661. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1662. struct btrfs_free_space *info;
  1663. struct rb_node *n;
  1664. int count = 0;
  1665. for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
  1666. info = rb_entry(n, struct btrfs_free_space, offset_index);
  1667. if (info->bytes >= bytes)
  1668. count++;
  1669. printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
  1670. (unsigned long long)info->offset,
  1671. (unsigned long long)info->bytes,
  1672. (info->bitmap) ? "yes" : "no");
  1673. }
  1674. printk(KERN_INFO "block group has cluster?: %s\n",
  1675. list_empty(&block_group->cluster_list) ? "no" : "yes");
  1676. printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
  1677. "\n", count);
  1678. }
  1679. void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
  1680. {
  1681. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1682. spin_lock_init(&ctl->tree_lock);
  1683. ctl->unit = block_group->sectorsize;
  1684. ctl->start = block_group->key.objectid;
  1685. ctl->private = block_group;
  1686. ctl->op = &free_space_op;
  1687. /*
  1688. * we only want to have 32k of ram per block group for keeping
  1689. * track of free space, and if we pass 1/2 of that we want to
  1690. * start converting things over to using bitmaps
  1691. */
  1692. ctl->extents_thresh = ((1024 * 32) / 2) /
  1693. sizeof(struct btrfs_free_space);
  1694. }
  1695. /*
  1696. * for a given cluster, put all of its extents back into the free
  1697. * space cache. If the block group passed doesn't match the block group
  1698. * pointed to by the cluster, someone else raced in and freed the
  1699. * cluster already. In that case, we just return without changing anything
  1700. */
  1701. static int
  1702. __btrfs_return_cluster_to_free_space(
  1703. struct btrfs_block_group_cache *block_group,
  1704. struct btrfs_free_cluster *cluster)
  1705. {
  1706. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1707. struct btrfs_free_space *entry;
  1708. struct rb_node *node;
  1709. spin_lock(&cluster->lock);
  1710. if (cluster->block_group != block_group)
  1711. goto out;
  1712. cluster->block_group = NULL;
  1713. cluster->window_start = 0;
  1714. list_del_init(&cluster->block_group_list);
  1715. node = rb_first(&cluster->root);
  1716. while (node) {
  1717. bool bitmap;
  1718. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  1719. node = rb_next(&entry->offset_index);
  1720. rb_erase(&entry->offset_index, &cluster->root);
  1721. bitmap = (entry->bitmap != NULL);
  1722. if (!bitmap)
  1723. try_merge_free_space(ctl, entry, false);
  1724. tree_insert_offset(&ctl->free_space_offset,
  1725. entry->offset, &entry->offset_index, bitmap);
  1726. }
  1727. cluster->root = RB_ROOT;
  1728. out:
  1729. spin_unlock(&cluster->lock);
  1730. btrfs_put_block_group(block_group);
  1731. return 0;
  1732. }
  1733. void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
  1734. {
  1735. struct btrfs_free_space *info;
  1736. struct rb_node *node;
  1737. while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
  1738. info = rb_entry(node, struct btrfs_free_space, offset_index);
  1739. if (!info->bitmap) {
  1740. unlink_free_space(ctl, info);
  1741. kmem_cache_free(btrfs_free_space_cachep, info);
  1742. } else {
  1743. free_bitmap(ctl, info);
  1744. }
  1745. if (need_resched()) {
  1746. spin_unlock(&ctl->tree_lock);
  1747. cond_resched();
  1748. spin_lock(&ctl->tree_lock);
  1749. }
  1750. }
  1751. }
  1752. void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
  1753. {
  1754. spin_lock(&ctl->tree_lock);
  1755. __btrfs_remove_free_space_cache_locked(ctl);
  1756. spin_unlock(&ctl->tree_lock);
  1757. }
  1758. void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
  1759. {
  1760. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1761. struct btrfs_free_cluster *cluster;
  1762. struct list_head *head;
  1763. spin_lock(&ctl->tree_lock);
  1764. while ((head = block_group->cluster_list.next) !=
  1765. &block_group->cluster_list) {
  1766. cluster = list_entry(head, struct btrfs_free_cluster,
  1767. block_group_list);
  1768. WARN_ON(cluster->block_group != block_group);
  1769. __btrfs_return_cluster_to_free_space(block_group, cluster);
  1770. if (need_resched()) {
  1771. spin_unlock(&ctl->tree_lock);
  1772. cond_resched();
  1773. spin_lock(&ctl->tree_lock);
  1774. }
  1775. }
  1776. __btrfs_remove_free_space_cache_locked(ctl);
  1777. spin_unlock(&ctl->tree_lock);
  1778. }
  1779. u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
  1780. u64 offset, u64 bytes, u64 empty_size)
  1781. {
  1782. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1783. struct btrfs_free_space *entry = NULL;
  1784. u64 bytes_search = bytes + empty_size;
  1785. u64 ret = 0;
  1786. spin_lock(&ctl->tree_lock);
  1787. entry = find_free_space(ctl, &offset, &bytes_search);
  1788. if (!entry)
  1789. goto out;
  1790. ret = offset;
  1791. if (entry->bitmap) {
  1792. bitmap_clear_bits(ctl, entry, offset, bytes);
  1793. if (!entry->bytes)
  1794. free_bitmap(ctl, entry);
  1795. } else {
  1796. unlink_free_space(ctl, entry);
  1797. entry->offset += bytes;
  1798. entry->bytes -= bytes;
  1799. if (!entry->bytes)
  1800. kmem_cache_free(btrfs_free_space_cachep, entry);
  1801. else
  1802. link_free_space(ctl, entry);
  1803. }
  1804. out:
  1805. spin_unlock(&ctl->tree_lock);
  1806. return ret;
  1807. }
  1808. /*
  1809. * given a cluster, put all of its extents back into the free space
  1810. * cache. If a block group is passed, this function will only free
  1811. * a cluster that belongs to the passed block group.
  1812. *
  1813. * Otherwise, it'll get a reference on the block group pointed to by the
  1814. * cluster and remove the cluster from it.
  1815. */
  1816. int btrfs_return_cluster_to_free_space(
  1817. struct btrfs_block_group_cache *block_group,
  1818. struct btrfs_free_cluster *cluster)
  1819. {
  1820. struct btrfs_free_space_ctl *ctl;
  1821. int ret;
  1822. /* first, get a safe pointer to the block group */
  1823. spin_lock(&cluster->lock);
  1824. if (!block_group) {
  1825. block_group = cluster->block_group;
  1826. if (!block_group) {
  1827. spin_unlock(&cluster->lock);
  1828. return 0;
  1829. }
  1830. } else if (cluster->block_group != block_group) {
  1831. /* someone else has already freed it don't redo their work */
  1832. spin_unlock(&cluster->lock);
  1833. return 0;
  1834. }
  1835. atomic_inc(&block_group->count);
  1836. spin_unlock(&cluster->lock);
  1837. ctl = block_group->free_space_ctl;
  1838. /* now return any extents the cluster had on it */
  1839. spin_lock(&ctl->tree_lock);
  1840. ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
  1841. spin_unlock(&ctl->tree_lock);
  1842. /* finally drop our ref */
  1843. btrfs_put_block_group(block_group);
  1844. return ret;
  1845. }
  1846. static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
  1847. struct btrfs_free_cluster *cluster,
  1848. struct btrfs_free_space *entry,
  1849. u64 bytes, u64 min_start)
  1850. {
  1851. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1852. int err;
  1853. u64 search_start = cluster->window_start;
  1854. u64 search_bytes = bytes;
  1855. u64 ret = 0;
  1856. search_start = min_start;
  1857. search_bytes = bytes;
  1858. err = search_bitmap(ctl, entry, &search_start, &search_bytes);
  1859. if (err)
  1860. return 0;
  1861. ret = search_start;
  1862. __bitmap_clear_bits(ctl, entry, ret, bytes);
  1863. return ret;
  1864. }
  1865. /*
  1866. * given a cluster, try to allocate 'bytes' from it, returns 0
  1867. * if it couldn't find anything suitably large, or a logical disk offset
  1868. * if things worked out
  1869. */
  1870. u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
  1871. struct btrfs_free_cluster *cluster, u64 bytes,
  1872. u64 min_start)
  1873. {
  1874. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1875. struct btrfs_free_space *entry = NULL;
  1876. struct rb_node *node;
  1877. u64 ret = 0;
  1878. spin_lock(&cluster->lock);
  1879. if (bytes > cluster->max_size)
  1880. goto out;
  1881. if (cluster->block_group != block_group)
  1882. goto out;
  1883. node = rb_first(&cluster->root);
  1884. if (!node)
  1885. goto out;
  1886. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  1887. while(1) {
  1888. if (entry->bytes < bytes ||
  1889. (!entry->bitmap && entry->offset < min_start)) {
  1890. node = rb_next(&entry->offset_index);
  1891. if (!node)
  1892. break;
  1893. entry = rb_entry(node, struct btrfs_free_space,
  1894. offset_index);
  1895. continue;
  1896. }
  1897. if (entry->bitmap) {
  1898. ret = btrfs_alloc_from_bitmap(block_group,
  1899. cluster, entry, bytes,
  1900. min_start);
  1901. if (ret == 0) {
  1902. node = rb_next(&entry->offset_index);
  1903. if (!node)
  1904. break;
  1905. entry = rb_entry(node, struct btrfs_free_space,
  1906. offset_index);
  1907. continue;
  1908. }
  1909. } else {
  1910. ret = entry->offset;
  1911. entry->offset += bytes;
  1912. entry->bytes -= bytes;
  1913. }
  1914. if (entry->bytes == 0)
  1915. rb_erase(&entry->offset_index, &cluster->root);
  1916. break;
  1917. }
  1918. out:
  1919. spin_unlock(&cluster->lock);
  1920. if (!ret)
  1921. return 0;
  1922. spin_lock(&ctl->tree_lock);
  1923. ctl->free_space -= bytes;
  1924. if (entry->bytes == 0) {
  1925. ctl->free_extents--;
  1926. if (entry->bitmap) {
  1927. kfree(entry->bitmap);
  1928. ctl->total_bitmaps--;
  1929. ctl->op->recalc_thresholds(ctl);
  1930. }
  1931. kmem_cache_free(btrfs_free_space_cachep, entry);
  1932. }
  1933. spin_unlock(&ctl->tree_lock);
  1934. return ret;
  1935. }
  1936. static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
  1937. struct btrfs_free_space *entry,
  1938. struct btrfs_free_cluster *cluster,
  1939. u64 offset, u64 bytes, u64 min_bytes)
  1940. {
  1941. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  1942. unsigned long next_zero;
  1943. unsigned long i;
  1944. unsigned long search_bits;
  1945. unsigned long total_bits;
  1946. unsigned long found_bits;
  1947. unsigned long start = 0;
  1948. unsigned long total_found = 0;
  1949. int ret;
  1950. bool found = false;
  1951. i = offset_to_bit(entry->offset, block_group->sectorsize,
  1952. max_t(u64, offset, entry->offset));
  1953. search_bits = bytes_to_bits(bytes, block_group->sectorsize);
  1954. total_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
  1955. again:
  1956. found_bits = 0;
  1957. for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
  1958. i < BITS_PER_BITMAP;
  1959. i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
  1960. next_zero = find_next_zero_bit(entry->bitmap,
  1961. BITS_PER_BITMAP, i);
  1962. if (next_zero - i >= search_bits) {
  1963. found_bits = next_zero - i;
  1964. break;
  1965. }
  1966. i = next_zero;
  1967. }
  1968. if (!found_bits)
  1969. return -ENOSPC;
  1970. if (!found) {
  1971. start = i;
  1972. found = true;
  1973. }
  1974. total_found += found_bits;
  1975. if (cluster->max_size < found_bits * block_group->sectorsize)
  1976. cluster->max_size = found_bits * block_group->sectorsize;
  1977. if (total_found < total_bits) {
  1978. i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
  1979. if (i - start > total_bits * 2) {
  1980. total_found = 0;
  1981. cluster->max_size = 0;
  1982. found = false;
  1983. }
  1984. goto again;
  1985. }
  1986. cluster->window_start = start * block_group->sectorsize +
  1987. entry->offset;
  1988. rb_erase(&entry->offset_index, &ctl->free_space_offset);
  1989. ret = tree_insert_offset(&cluster->root, entry->offset,
  1990. &entry->offset_index, 1);
  1991. BUG_ON(ret);
  1992. return 0;
  1993. }
  1994. /*
  1995. * This searches the block group for just extents to fill the cluster with.
  1996. */
  1997. static noinline int
  1998. setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
  1999. struct btrfs_free_cluster *cluster,
  2000. struct list_head *bitmaps, u64 offset, u64 bytes,
  2001. u64 min_bytes)
  2002. {
  2003. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2004. struct btrfs_free_space *first = NULL;
  2005. struct btrfs_free_space *entry = NULL;
  2006. struct btrfs_free_space *prev = NULL;
  2007. struct btrfs_free_space *last;
  2008. struct rb_node *node;
  2009. u64 window_start;
  2010. u64 window_free;
  2011. u64 max_extent;
  2012. u64 max_gap = 128 * 1024;
  2013. entry = tree_search_offset(ctl, offset, 0, 1);
  2014. if (!entry)
  2015. return -ENOSPC;
  2016. /*
  2017. * We don't want bitmaps, so just move along until we find a normal
  2018. * extent entry.
  2019. */
  2020. while (entry->bitmap) {
  2021. if (list_empty(&entry->list))
  2022. list_add_tail(&entry->list, bitmaps);
  2023. node = rb_next(&entry->offset_index);
  2024. if (!node)
  2025. return -ENOSPC;
  2026. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2027. }
  2028. window_start = entry->offset;
  2029. window_free = entry->bytes;
  2030. max_extent = entry->bytes;
  2031. first = entry;
  2032. last = entry;
  2033. prev = entry;
  2034. while (window_free <= min_bytes) {
  2035. node = rb_next(&entry->offset_index);
  2036. if (!node)
  2037. return -ENOSPC;
  2038. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2039. if (entry->bitmap) {
  2040. if (list_empty(&entry->list))
  2041. list_add_tail(&entry->list, bitmaps);
  2042. continue;
  2043. }
  2044. /*
  2045. * we haven't filled the empty size and the window is
  2046. * very large. reset and try again
  2047. */
  2048. if (entry->offset - (prev->offset + prev->bytes) > max_gap ||
  2049. entry->offset - window_start > (min_bytes * 2)) {
  2050. first = entry;
  2051. window_start = entry->offset;
  2052. window_free = entry->bytes;
  2053. last = entry;
  2054. max_extent = entry->bytes;
  2055. } else {
  2056. last = entry;
  2057. window_free += entry->bytes;
  2058. if (entry->bytes > max_extent)
  2059. max_extent = entry->bytes;
  2060. }
  2061. prev = entry;
  2062. }
  2063. cluster->window_start = first->offset;
  2064. node = &first->offset_index;
  2065. /*
  2066. * now we've found our entries, pull them out of the free space
  2067. * cache and put them into the cluster rbtree
  2068. */
  2069. do {
  2070. int ret;
  2071. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2072. node = rb_next(&entry->offset_index);
  2073. if (entry->bitmap)
  2074. continue;
  2075. rb_erase(&entry->offset_index, &ctl->free_space_offset);
  2076. ret = tree_insert_offset(&cluster->root, entry->offset,
  2077. &entry->offset_index, 0);
  2078. BUG_ON(ret);
  2079. } while (node && entry != last);
  2080. cluster->max_size = max_extent;
  2081. return 0;
  2082. }
  2083. /*
  2084. * This specifically looks for bitmaps that may work in the cluster, we assume
  2085. * that we have already failed to find extents that will work.
  2086. */
  2087. static noinline int
  2088. setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
  2089. struct btrfs_free_cluster *cluster,
  2090. struct list_head *bitmaps, u64 offset, u64 bytes,
  2091. u64 min_bytes)
  2092. {
  2093. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2094. struct btrfs_free_space *entry;
  2095. struct rb_node *node;
  2096. int ret = -ENOSPC;
  2097. if (ctl->total_bitmaps == 0)
  2098. return -ENOSPC;
  2099. /*
  2100. * First check our cached list of bitmaps and see if there is an entry
  2101. * here that will work.
  2102. */
  2103. list_for_each_entry(entry, bitmaps, list) {
  2104. if (entry->bytes < min_bytes)
  2105. continue;
  2106. ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
  2107. bytes, min_bytes);
  2108. if (!ret)
  2109. return 0;
  2110. }
  2111. /*
  2112. * If we do have entries on our list and we are here then we didn't find
  2113. * anything, so go ahead and get the next entry after the last entry in
  2114. * this list and start the search from there.
  2115. */
  2116. if (!list_empty(bitmaps)) {
  2117. entry = list_entry(bitmaps->prev, struct btrfs_free_space,
  2118. list);
  2119. node = rb_next(&entry->offset_index);
  2120. if (!node)
  2121. return -ENOSPC;
  2122. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2123. goto search;
  2124. }
  2125. entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
  2126. if (!entry)
  2127. return -ENOSPC;
  2128. search:
  2129. node = &entry->offset_index;
  2130. do {
  2131. entry = rb_entry(node, struct btrfs_free_space, offset_index);
  2132. node = rb_next(&entry->offset_index);
  2133. if (!entry->bitmap)
  2134. continue;
  2135. if (entry->bytes < min_bytes)
  2136. continue;
  2137. ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
  2138. bytes, min_bytes);
  2139. } while (ret && node);
  2140. return ret;
  2141. }
  2142. /*
  2143. * here we try to find a cluster of blocks in a block group. The goal
  2144. * is to find at least bytes free and up to empty_size + bytes free.
  2145. * We might not find them all in one contiguous area.
  2146. *
  2147. * returns zero and sets up cluster if things worked out, otherwise
  2148. * it returns -enospc
  2149. */
  2150. int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
  2151. struct btrfs_root *root,
  2152. struct btrfs_block_group_cache *block_group,
  2153. struct btrfs_free_cluster *cluster,
  2154. u64 offset, u64 bytes, u64 empty_size)
  2155. {
  2156. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2157. struct list_head bitmaps;
  2158. struct btrfs_free_space *entry, *tmp;
  2159. u64 min_bytes;
  2160. int ret;
  2161. /* for metadata, allow allocates with more holes */
  2162. if (btrfs_test_opt(root, SSD_SPREAD)) {
  2163. min_bytes = bytes + empty_size;
  2164. } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
  2165. /*
  2166. * we want to do larger allocations when we are
  2167. * flushing out the delayed refs, it helps prevent
  2168. * making more work as we go along.
  2169. */
  2170. if (trans->transaction->delayed_refs.flushing)
  2171. min_bytes = max(bytes, (bytes + empty_size) >> 1);
  2172. else
  2173. min_bytes = max(bytes, (bytes + empty_size) >> 4);
  2174. } else
  2175. min_bytes = max(bytes, (bytes + empty_size) >> 2);
  2176. spin_lock(&ctl->tree_lock);
  2177. /*
  2178. * If we know we don't have enough space to make a cluster don't even
  2179. * bother doing all the work to try and find one.
  2180. */
  2181. if (ctl->free_space < min_bytes) {
  2182. spin_unlock(&ctl->tree_lock);
  2183. return -ENOSPC;
  2184. }
  2185. spin_lock(&cluster->lock);
  2186. /* someone already found a cluster, hooray */
  2187. if (cluster->block_group) {
  2188. ret = 0;
  2189. goto out;
  2190. }
  2191. INIT_LIST_HEAD(&bitmaps);
  2192. ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
  2193. bytes, min_bytes);
  2194. if (ret)
  2195. ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
  2196. offset, bytes, min_bytes);
  2197. /* Clear our temporary list */
  2198. list_for_each_entry_safe(entry, tmp, &bitmaps, list)
  2199. list_del_init(&entry->list);
  2200. if (!ret) {
  2201. atomic_inc(&block_group->count);
  2202. list_add_tail(&cluster->block_group_list,
  2203. &block_group->cluster_list);
  2204. cluster->block_group = block_group;
  2205. }
  2206. out:
  2207. spin_unlock(&cluster->lock);
  2208. spin_unlock(&ctl->tree_lock);
  2209. return ret;
  2210. }
  2211. /*
  2212. * simple code to zero out a cluster
  2213. */
  2214. void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
  2215. {
  2216. spin_lock_init(&cluster->lock);
  2217. spin_lock_init(&cluster->refill_lock);
  2218. cluster->root = RB_ROOT;
  2219. cluster->max_size = 0;
  2220. INIT_LIST_HEAD(&cluster->block_group_list);
  2221. cluster->block_group = NULL;
  2222. }
  2223. int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
  2224. u64 *trimmed, u64 start, u64 end, u64 minlen)
  2225. {
  2226. struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
  2227. struct btrfs_free_space *entry = NULL;
  2228. struct btrfs_fs_info *fs_info = block_group->fs_info;
  2229. u64 bytes = 0;
  2230. u64 actually_trimmed;
  2231. int ret = 0;
  2232. *trimmed = 0;
  2233. while (start < end) {
  2234. spin_lock(&ctl->tree_lock);
  2235. if (ctl->free_space < minlen) {
  2236. spin_unlock(&ctl->tree_lock);
  2237. break;
  2238. }
  2239. entry = tree_search_offset(ctl, start, 0, 1);
  2240. if (!entry)
  2241. entry = tree_search_offset(ctl,
  2242. offset_to_bitmap(ctl, start),
  2243. 1, 1);
  2244. if (!entry || entry->offset >= end) {
  2245. spin_unlock(&ctl->tree_lock);
  2246. break;
  2247. }
  2248. if (entry->bitmap) {
  2249. ret = search_bitmap(ctl, entry, &start, &bytes);
  2250. if (!ret) {
  2251. if (start >= end) {
  2252. spin_unlock(&ctl->tree_lock);
  2253. break;
  2254. }
  2255. bytes = min(bytes, end - start);
  2256. bitmap_clear_bits(ctl, entry, start, bytes);
  2257. if (entry->bytes == 0)
  2258. free_bitmap(ctl, entry);
  2259. } else {
  2260. start = entry->offset + BITS_PER_BITMAP *
  2261. block_group->sectorsize;
  2262. spin_unlock(&ctl->tree_lock);
  2263. ret = 0;
  2264. continue;
  2265. }
  2266. } else {
  2267. start = entry->offset;
  2268. bytes = min(entry->bytes, end - start);
  2269. unlink_free_space(ctl, entry);
  2270. kmem_cache_free(btrfs_free_space_cachep, entry);
  2271. }
  2272. spin_unlock(&ctl->tree_lock);
  2273. if (bytes >= minlen) {
  2274. struct btrfs_space_info *space_info;
  2275. int update = 0;
  2276. space_info = block_group->space_info;
  2277. spin_lock(&space_info->lock);
  2278. spin_lock(&block_group->lock);
  2279. if (!block_group->ro) {
  2280. block_group->reserved += bytes;
  2281. space_info->bytes_reserved += bytes;
  2282. update = 1;
  2283. }
  2284. spin_unlock(&block_group->lock);
  2285. spin_unlock(&space_info->lock);
  2286. ret = btrfs_error_discard_extent(fs_info->extent_root,
  2287. start,
  2288. bytes,
  2289. &actually_trimmed);
  2290. btrfs_add_free_space(block_group, start, bytes);
  2291. if (update) {
  2292. spin_lock(&space_info->lock);
  2293. spin_lock(&block_group->lock);
  2294. if (block_group->ro)
  2295. space_info->bytes_readonly += bytes;
  2296. block_group->reserved -= bytes;
  2297. space_info->bytes_reserved -= bytes;
  2298. spin_unlock(&space_info->lock);
  2299. spin_unlock(&block_group->lock);
  2300. }
  2301. if (ret)
  2302. break;
  2303. *trimmed += actually_trimmed;
  2304. }
  2305. start += bytes;
  2306. bytes = 0;
  2307. if (fatal_signal_pending(current)) {
  2308. ret = -ERESTARTSYS;
  2309. break;
  2310. }
  2311. cond_resched();
  2312. }
  2313. return ret;
  2314. }
  2315. /*
  2316. * Find the left-most item in the cache tree, and then return the
  2317. * smallest inode number in the item.
  2318. *
  2319. * Note: the returned inode number may not be the smallest one in
  2320. * the tree, if the left-most item is a bitmap.
  2321. */
  2322. u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
  2323. {
  2324. struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
  2325. struct btrfs_free_space *entry = NULL;
  2326. u64 ino = 0;
  2327. spin_lock(&ctl->tree_lock);
  2328. if (RB_EMPTY_ROOT(&ctl->free_space_offset))
  2329. goto out;
  2330. entry = rb_entry(rb_first(&ctl->free_space_offset),
  2331. struct btrfs_free_space, offset_index);
  2332. if (!entry->bitmap) {
  2333. ino = entry->offset;
  2334. unlink_free_space(ctl, entry);
  2335. entry->offset++;
  2336. entry->bytes--;
  2337. if (!entry->bytes)
  2338. kmem_cache_free(btrfs_free_space_cachep, entry);
  2339. else
  2340. link_free_space(ctl, entry);
  2341. } else {
  2342. u64 offset = 0;
  2343. u64 count = 1;
  2344. int ret;
  2345. ret = search_bitmap(ctl, entry, &offset, &count);
  2346. BUG_ON(ret);
  2347. ino = offset;
  2348. bitmap_clear_bits(ctl, entry, offset, 1);
  2349. if (entry->bytes == 0)
  2350. free_bitmap(ctl, entry);
  2351. }
  2352. out:
  2353. spin_unlock(&ctl->tree_lock);
  2354. return ino;
  2355. }
  2356. struct inode *lookup_free_ino_inode(struct btrfs_root *root,
  2357. struct btrfs_path *path)
  2358. {
  2359. struct inode *inode = NULL;
  2360. spin_lock(&root->cache_lock);
  2361. if (root->cache_inode)
  2362. inode = igrab(root->cache_inode);
  2363. spin_unlock(&root->cache_lock);
  2364. if (inode)
  2365. return inode;
  2366. inode = __lookup_free_space_inode(root, path, 0);
  2367. if (IS_ERR(inode))
  2368. return inode;
  2369. spin_lock(&root->cache_lock);
  2370. if (!btrfs_fs_closing(root->fs_info))
  2371. root->cache_inode = igrab(inode);
  2372. spin_unlock(&root->cache_lock);
  2373. return inode;
  2374. }
  2375. int create_free_ino_inode(struct btrfs_root *root,
  2376. struct btrfs_trans_handle *trans,
  2377. struct btrfs_path *path)
  2378. {
  2379. return __create_free_space_inode(root, trans, path,
  2380. BTRFS_FREE_INO_OBJECTID, 0);
  2381. }
  2382. int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
  2383. {
  2384. struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
  2385. struct btrfs_path *path;
  2386. struct inode *inode;
  2387. int ret = 0;
  2388. u64 root_gen = btrfs_root_generation(&root->root_item);
  2389. if (!btrfs_test_opt(root, INODE_MAP_CACHE))
  2390. return 0;
  2391. /*
  2392. * If we're unmounting then just return, since this does a search on the
  2393. * normal root and not the commit root and we could deadlock.
  2394. */
  2395. if (btrfs_fs_closing(fs_info))
  2396. return 0;
  2397. path = btrfs_alloc_path();
  2398. if (!path)
  2399. return 0;
  2400. inode = lookup_free_ino_inode(root, path);
  2401. if (IS_ERR(inode))
  2402. goto out;
  2403. if (root_gen != BTRFS_I(inode)->generation)
  2404. goto out_put;
  2405. ret = __load_free_space_cache(root, inode, ctl, path, 0);
  2406. if (ret < 0)
  2407. printk(KERN_ERR "btrfs: failed to load free ino cache for "
  2408. "root %llu\n", root->root_key.objectid);
  2409. out_put:
  2410. iput(inode);
  2411. out:
  2412. btrfs_free_path(path);
  2413. return ret;
  2414. }
  2415. int btrfs_write_out_ino_cache(struct btrfs_root *root,
  2416. struct btrfs_trans_handle *trans,
  2417. struct btrfs_path *path)
  2418. {
  2419. struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
  2420. struct inode *inode;
  2421. int ret;
  2422. if (!btrfs_test_opt(root, INODE_MAP_CACHE))
  2423. return 0;
  2424. inode = lookup_free_ino_inode(root, path);
  2425. if (IS_ERR(inode))
  2426. return 0;
  2427. ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
  2428. if (ret) {
  2429. btrfs_delalloc_release_metadata(inode, inode->i_size);
  2430. #ifdef DEBUG
  2431. printk(KERN_ERR "btrfs: failed to write free ino cache "
  2432. "for root %llu\n", root->root_key.objectid);
  2433. #endif
  2434. }
  2435. iput(inode);
  2436. return ret;
  2437. }