inode.c 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/buffer_head.h>
  19. #include <linux/fs.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/highmem.h>
  22. #include <linux/time.h>
  23. #include <linux/init.h>
  24. #include <linux/string.h>
  25. #include <linux/smp_lock.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/mpage.h>
  28. #include <linux/swap.h>
  29. #include <linux/writeback.h>
  30. #include <linux/statfs.h>
  31. #include <linux/compat.h>
  32. #include <linux/bit_spinlock.h>
  33. #include <linux/version.h>
  34. #include <linux/xattr.h>
  35. #include "ctree.h"
  36. #include "disk-io.h"
  37. #include "transaction.h"
  38. #include "btrfs_inode.h"
  39. #include "ioctl.h"
  40. #include "print-tree.h"
  41. struct btrfs_iget_args {
  42. u64 ino;
  43. struct btrfs_root *root;
  44. };
  45. static struct inode_operations btrfs_dir_inode_operations;
  46. static struct inode_operations btrfs_symlink_inode_operations;
  47. static struct inode_operations btrfs_dir_ro_inode_operations;
  48. static struct inode_operations btrfs_special_inode_operations;
  49. static struct inode_operations btrfs_file_inode_operations;
  50. static struct address_space_operations btrfs_aops;
  51. static struct address_space_operations btrfs_symlink_aops;
  52. static struct file_operations btrfs_dir_file_operations;
  53. static struct extent_map_ops btrfs_extent_map_ops;
  54. static struct kmem_cache *btrfs_inode_cachep;
  55. struct kmem_cache *btrfs_trans_handle_cachep;
  56. struct kmem_cache *btrfs_transaction_cachep;
  57. struct kmem_cache *btrfs_bit_radix_cachep;
  58. struct kmem_cache *btrfs_path_cachep;
  59. #define S_SHIFT 12
  60. static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
  61. [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
  62. [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
  63. [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
  64. [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
  65. [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
  66. [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
  67. [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
  68. };
  69. int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
  70. int for_del)
  71. {
  72. u64 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
  73. u64 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
  74. u64 thresh;
  75. int ret = 0;
  76. if (for_del)
  77. thresh = total * 90;
  78. else
  79. thresh = total * 85;
  80. do_div(thresh, 100);
  81. spin_lock(&root->fs_info->delalloc_lock);
  82. if (used + root->fs_info->delalloc_bytes + num_required > thresh)
  83. ret = -ENOSPC;
  84. spin_unlock(&root->fs_info->delalloc_lock);
  85. return ret;
  86. }
  87. static int cow_file_range(struct inode *inode, u64 start, u64 end)
  88. {
  89. struct btrfs_root *root = BTRFS_I(inode)->root;
  90. struct btrfs_trans_handle *trans;
  91. u64 alloc_hint = 0;
  92. u64 num_bytes;
  93. u64 cur_alloc_size;
  94. u64 blocksize = root->sectorsize;
  95. struct btrfs_key ins;
  96. int ret;
  97. trans = btrfs_start_transaction(root, 1);
  98. BUG_ON(!trans);
  99. btrfs_set_trans_block_group(trans, inode);
  100. num_bytes = (end - start + blocksize) & ~(blocksize - 1);
  101. num_bytes = max(blocksize, num_bytes);
  102. ret = btrfs_drop_extents(trans, root, inode,
  103. start, start + num_bytes, start, &alloc_hint);
  104. if (alloc_hint == EXTENT_MAP_INLINE)
  105. goto out;
  106. while(num_bytes > 0) {
  107. cur_alloc_size = min(num_bytes, root->fs_info->max_extent);
  108. ret = btrfs_alloc_extent(trans, root, cur_alloc_size,
  109. root->root_key.objectid,
  110. trans->transid,
  111. inode->i_ino, start, 0,
  112. alloc_hint, (u64)-1, &ins, 1);
  113. if (ret) {
  114. WARN_ON(1);
  115. goto out;
  116. }
  117. ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
  118. start, ins.objectid, ins.offset,
  119. ins.offset);
  120. num_bytes -= cur_alloc_size;
  121. alloc_hint = ins.objectid + ins.offset;
  122. start += cur_alloc_size;
  123. }
  124. btrfs_add_ordered_inode(inode);
  125. out:
  126. btrfs_end_transaction(trans, root);
  127. return ret;
  128. }
  129. static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end)
  130. {
  131. u64 extent_start;
  132. u64 extent_end;
  133. u64 bytenr;
  134. u64 cow_end;
  135. u64 loops = 0;
  136. u64 total_fs_bytes;
  137. struct btrfs_root *root = BTRFS_I(inode)->root;
  138. struct extent_buffer *leaf;
  139. int found_type;
  140. struct btrfs_path *path;
  141. struct btrfs_file_extent_item *item;
  142. int ret;
  143. int err;
  144. struct btrfs_key found_key;
  145. total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
  146. path = btrfs_alloc_path();
  147. BUG_ON(!path);
  148. again:
  149. ret = btrfs_lookup_file_extent(NULL, root, path,
  150. inode->i_ino, start, 0);
  151. if (ret < 0) {
  152. btrfs_free_path(path);
  153. return ret;
  154. }
  155. cow_end = end;
  156. if (ret != 0) {
  157. if (path->slots[0] == 0)
  158. goto not_found;
  159. path->slots[0]--;
  160. }
  161. leaf = path->nodes[0];
  162. item = btrfs_item_ptr(leaf, path->slots[0],
  163. struct btrfs_file_extent_item);
  164. /* are we inside the extent that was found? */
  165. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  166. found_type = btrfs_key_type(&found_key);
  167. if (found_key.objectid != inode->i_ino ||
  168. found_type != BTRFS_EXTENT_DATA_KEY) {
  169. goto not_found;
  170. }
  171. found_type = btrfs_file_extent_type(leaf, item);
  172. extent_start = found_key.offset;
  173. if (found_type == BTRFS_FILE_EXTENT_REG) {
  174. u64 extent_num_bytes;
  175. extent_num_bytes = btrfs_file_extent_num_bytes(leaf, item);
  176. extent_end = extent_start + extent_num_bytes;
  177. err = 0;
  178. if (loops && start != extent_start)
  179. goto not_found;
  180. if (start < extent_start || start >= extent_end)
  181. goto not_found;
  182. cow_end = min(end, extent_end - 1);
  183. bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
  184. if (bytenr == 0)
  185. goto not_found;
  186. /*
  187. * we may be called by the resizer, make sure we're inside
  188. * the limits of the FS
  189. */
  190. if (bytenr + extent_num_bytes > total_fs_bytes)
  191. goto not_found;
  192. if (btrfs_count_snapshots_in_path(root, path, bytenr) != 1) {
  193. goto not_found;
  194. }
  195. start = extent_end;
  196. } else {
  197. goto not_found;
  198. }
  199. loop:
  200. if (start > end) {
  201. btrfs_free_path(path);
  202. return 0;
  203. }
  204. btrfs_release_path(root, path);
  205. loops++;
  206. goto again;
  207. not_found:
  208. cow_file_range(inode, start, cow_end);
  209. start = cow_end + 1;
  210. goto loop;
  211. }
  212. static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
  213. {
  214. struct btrfs_root *root = BTRFS_I(inode)->root;
  215. u64 num_bytes;
  216. int ret;
  217. mutex_lock(&root->fs_info->fs_mutex);
  218. if (btrfs_test_opt(root, NODATACOW))
  219. ret = run_delalloc_nocow(inode, start, end);
  220. else
  221. ret = cow_file_range(inode, start, end);
  222. spin_lock(&root->fs_info->delalloc_lock);
  223. num_bytes = end + 1 - start;
  224. if (root->fs_info->delalloc_bytes < num_bytes) {
  225. printk("delalloc accounting error total %llu sub %llu\n",
  226. root->fs_info->delalloc_bytes, num_bytes);
  227. } else {
  228. root->fs_info->delalloc_bytes -= num_bytes;
  229. }
  230. spin_unlock(&root->fs_info->delalloc_lock);
  231. mutex_unlock(&root->fs_info->fs_mutex);
  232. return ret;
  233. }
  234. int btrfs_writepage_io_hook(struct page *page, u64 start, u64 end)
  235. {
  236. struct inode *inode = page->mapping->host;
  237. struct btrfs_root *root = BTRFS_I(inode)->root;
  238. struct btrfs_trans_handle *trans;
  239. char *kaddr;
  240. int ret = 0;
  241. u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  242. size_t offset = start - page_start;
  243. if (btrfs_test_opt(root, NODATASUM))
  244. return 0;
  245. mutex_lock(&root->fs_info->fs_mutex);
  246. trans = btrfs_start_transaction(root, 1);
  247. btrfs_set_trans_block_group(trans, inode);
  248. kaddr = kmap(page);
  249. btrfs_csum_file_block(trans, root, inode, inode->i_ino,
  250. start, kaddr + offset, end - start + 1);
  251. kunmap(page);
  252. ret = btrfs_end_transaction(trans, root);
  253. BUG_ON(ret);
  254. mutex_unlock(&root->fs_info->fs_mutex);
  255. return ret;
  256. }
  257. int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
  258. {
  259. int ret = 0;
  260. struct inode *inode = page->mapping->host;
  261. struct btrfs_root *root = BTRFS_I(inode)->root;
  262. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  263. struct btrfs_csum_item *item;
  264. struct btrfs_path *path = NULL;
  265. u32 csum;
  266. if (btrfs_test_opt(root, NODATASUM))
  267. return 0;
  268. mutex_lock(&root->fs_info->fs_mutex);
  269. path = btrfs_alloc_path();
  270. item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0);
  271. if (IS_ERR(item)) {
  272. ret = PTR_ERR(item);
  273. /* a csum that isn't present is a preallocated region. */
  274. if (ret == -ENOENT || ret == -EFBIG)
  275. ret = 0;
  276. csum = 0;
  277. goto out;
  278. }
  279. read_extent_buffer(path->nodes[0], &csum, (unsigned long)item,
  280. BTRFS_CRC32_SIZE);
  281. set_state_private(em_tree, start, csum);
  282. out:
  283. if (path)
  284. btrfs_free_path(path);
  285. mutex_unlock(&root->fs_info->fs_mutex);
  286. return ret;
  287. }
  288. int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end)
  289. {
  290. size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
  291. struct inode *inode = page->mapping->host;
  292. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  293. char *kaddr;
  294. u64 private;
  295. int ret;
  296. struct btrfs_root *root = BTRFS_I(inode)->root;
  297. u32 csum = ~(u32)0;
  298. unsigned long flags;
  299. if (btrfs_test_opt(root, NODATASUM))
  300. return 0;
  301. ret = get_state_private(em_tree, start, &private);
  302. local_irq_save(flags);
  303. kaddr = kmap_atomic(page, KM_IRQ0);
  304. if (ret) {
  305. goto zeroit;
  306. }
  307. csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
  308. btrfs_csum_final(csum, (char *)&csum);
  309. if (csum != private) {
  310. goto zeroit;
  311. }
  312. kunmap_atomic(kaddr, KM_IRQ0);
  313. local_irq_restore(flags);
  314. return 0;
  315. zeroit:
  316. printk("btrfs csum failed ino %lu off %llu\n",
  317. page->mapping->host->i_ino, (unsigned long long)start);
  318. memset(kaddr + offset, 1, end - start + 1);
  319. flush_dcache_page(page);
  320. kunmap_atomic(kaddr, KM_IRQ0);
  321. local_irq_restore(flags);
  322. return 0;
  323. }
  324. void btrfs_read_locked_inode(struct inode *inode)
  325. {
  326. struct btrfs_path *path;
  327. struct extent_buffer *leaf;
  328. struct btrfs_inode_item *inode_item;
  329. struct btrfs_inode_timespec *tspec;
  330. struct btrfs_root *root = BTRFS_I(inode)->root;
  331. struct btrfs_key location;
  332. u64 alloc_group_block;
  333. u32 rdev;
  334. int ret;
  335. path = btrfs_alloc_path();
  336. BUG_ON(!path);
  337. mutex_lock(&root->fs_info->fs_mutex);
  338. memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
  339. ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
  340. if (ret)
  341. goto make_bad;
  342. leaf = path->nodes[0];
  343. inode_item = btrfs_item_ptr(leaf, path->slots[0],
  344. struct btrfs_inode_item);
  345. inode->i_mode = btrfs_inode_mode(leaf, inode_item);
  346. inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
  347. inode->i_uid = btrfs_inode_uid(leaf, inode_item);
  348. inode->i_gid = btrfs_inode_gid(leaf, inode_item);
  349. inode->i_size = btrfs_inode_size(leaf, inode_item);
  350. tspec = btrfs_inode_atime(inode_item);
  351. inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
  352. inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
  353. tspec = btrfs_inode_mtime(inode_item);
  354. inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
  355. inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
  356. tspec = btrfs_inode_ctime(inode_item);
  357. inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
  358. inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
  359. inode->i_blocks = btrfs_inode_nblocks(leaf, inode_item);
  360. inode->i_generation = btrfs_inode_generation(leaf, inode_item);
  361. inode->i_rdev = 0;
  362. rdev = btrfs_inode_rdev(leaf, inode_item);
  363. alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
  364. BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
  365. alloc_group_block);
  366. if (!BTRFS_I(inode)->block_group) {
  367. BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
  368. NULL, 0, 0, 0);
  369. }
  370. btrfs_free_path(path);
  371. inode_item = NULL;
  372. mutex_unlock(&root->fs_info->fs_mutex);
  373. switch (inode->i_mode & S_IFMT) {
  374. case S_IFREG:
  375. inode->i_mapping->a_ops = &btrfs_aops;
  376. BTRFS_I(inode)->extent_tree.ops = &btrfs_extent_map_ops;
  377. inode->i_fop = &btrfs_file_operations;
  378. inode->i_op = &btrfs_file_inode_operations;
  379. break;
  380. case S_IFDIR:
  381. inode->i_fop = &btrfs_dir_file_operations;
  382. if (root == root->fs_info->tree_root)
  383. inode->i_op = &btrfs_dir_ro_inode_operations;
  384. else
  385. inode->i_op = &btrfs_dir_inode_operations;
  386. break;
  387. case S_IFLNK:
  388. inode->i_op = &btrfs_symlink_inode_operations;
  389. inode->i_mapping->a_ops = &btrfs_symlink_aops;
  390. break;
  391. default:
  392. init_special_inode(inode, inode->i_mode, rdev);
  393. break;
  394. }
  395. return;
  396. make_bad:
  397. btrfs_release_path(root, path);
  398. btrfs_free_path(path);
  399. mutex_unlock(&root->fs_info->fs_mutex);
  400. make_bad_inode(inode);
  401. }
  402. static void fill_inode_item(struct extent_buffer *leaf,
  403. struct btrfs_inode_item *item,
  404. struct inode *inode)
  405. {
  406. btrfs_set_inode_uid(leaf, item, inode->i_uid);
  407. btrfs_set_inode_gid(leaf, item, inode->i_gid);
  408. btrfs_set_inode_size(leaf, item, inode->i_size);
  409. btrfs_set_inode_mode(leaf, item, inode->i_mode);
  410. btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
  411. btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
  412. inode->i_atime.tv_sec);
  413. btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
  414. inode->i_atime.tv_nsec);
  415. btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
  416. inode->i_mtime.tv_sec);
  417. btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
  418. inode->i_mtime.tv_nsec);
  419. btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
  420. inode->i_ctime.tv_sec);
  421. btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
  422. inode->i_ctime.tv_nsec);
  423. btrfs_set_inode_nblocks(leaf, item, inode->i_blocks);
  424. btrfs_set_inode_generation(leaf, item, inode->i_generation);
  425. btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
  426. btrfs_set_inode_block_group(leaf, item,
  427. BTRFS_I(inode)->block_group->key.objectid);
  428. }
  429. int btrfs_update_inode(struct btrfs_trans_handle *trans,
  430. struct btrfs_root *root,
  431. struct inode *inode)
  432. {
  433. struct btrfs_inode_item *inode_item;
  434. struct btrfs_path *path;
  435. struct extent_buffer *leaf;
  436. int ret;
  437. path = btrfs_alloc_path();
  438. BUG_ON(!path);
  439. ret = btrfs_lookup_inode(trans, root, path,
  440. &BTRFS_I(inode)->location, 1);
  441. if (ret) {
  442. if (ret > 0)
  443. ret = -ENOENT;
  444. goto failed;
  445. }
  446. leaf = path->nodes[0];
  447. inode_item = btrfs_item_ptr(leaf, path->slots[0],
  448. struct btrfs_inode_item);
  449. fill_inode_item(leaf, inode_item, inode);
  450. btrfs_mark_buffer_dirty(leaf);
  451. btrfs_set_inode_last_trans(trans, inode);
  452. ret = 0;
  453. failed:
  454. btrfs_release_path(root, path);
  455. btrfs_free_path(path);
  456. return ret;
  457. }
  458. static int btrfs_unlink_trans(struct btrfs_trans_handle *trans,
  459. struct btrfs_root *root,
  460. struct inode *dir,
  461. struct dentry *dentry)
  462. {
  463. struct btrfs_path *path;
  464. const char *name = dentry->d_name.name;
  465. int name_len = dentry->d_name.len;
  466. int ret = 0;
  467. struct extent_buffer *leaf;
  468. struct btrfs_dir_item *di;
  469. struct btrfs_key key;
  470. path = btrfs_alloc_path();
  471. if (!path) {
  472. ret = -ENOMEM;
  473. goto err;
  474. }
  475. di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
  476. name, name_len, -1);
  477. if (IS_ERR(di)) {
  478. ret = PTR_ERR(di);
  479. goto err;
  480. }
  481. if (!di) {
  482. ret = -ENOENT;
  483. goto err;
  484. }
  485. leaf = path->nodes[0];
  486. btrfs_dir_item_key_to_cpu(leaf, di, &key);
  487. ret = btrfs_delete_one_dir_name(trans, root, path, di);
  488. if (ret)
  489. goto err;
  490. btrfs_release_path(root, path);
  491. di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
  492. key.objectid, name, name_len, -1);
  493. if (IS_ERR(di)) {
  494. ret = PTR_ERR(di);
  495. goto err;
  496. }
  497. if (!di) {
  498. ret = -ENOENT;
  499. goto err;
  500. }
  501. ret = btrfs_delete_one_dir_name(trans, root, path, di);
  502. dentry->d_inode->i_ctime = dir->i_ctime;
  503. ret = btrfs_del_inode_ref(trans, root, name, name_len,
  504. dentry->d_inode->i_ino,
  505. dentry->d_parent->d_inode->i_ino);
  506. if (ret) {
  507. printk("failed to delete reference to %.*s, "
  508. "inode %lu parent %lu\n", name_len, name,
  509. dentry->d_inode->i_ino,
  510. dentry->d_parent->d_inode->i_ino);
  511. }
  512. err:
  513. btrfs_free_path(path);
  514. if (!ret) {
  515. dir->i_size -= name_len * 2;
  516. dir->i_mtime = dir->i_ctime = CURRENT_TIME;
  517. btrfs_update_inode(trans, root, dir);
  518. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
  519. dentry->d_inode->i_nlink--;
  520. #else
  521. drop_nlink(dentry->d_inode);
  522. #endif
  523. ret = btrfs_update_inode(trans, root, dentry->d_inode);
  524. dir->i_sb->s_dirt = 1;
  525. }
  526. return ret;
  527. }
  528. static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
  529. {
  530. struct btrfs_root *root;
  531. struct btrfs_trans_handle *trans;
  532. int ret;
  533. unsigned long nr = 0;
  534. root = BTRFS_I(dir)->root;
  535. mutex_lock(&root->fs_info->fs_mutex);
  536. ret = btrfs_check_free_space(root, 1, 1);
  537. if (ret)
  538. goto fail;
  539. trans = btrfs_start_transaction(root, 1);
  540. btrfs_set_trans_block_group(trans, dir);
  541. ret = btrfs_unlink_trans(trans, root, dir, dentry);
  542. nr = trans->blocks_used;
  543. btrfs_end_transaction(trans, root);
  544. fail:
  545. mutex_unlock(&root->fs_info->fs_mutex);
  546. btrfs_btree_balance_dirty(root, nr);
  547. btrfs_throttle(root);
  548. return ret;
  549. }
  550. static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
  551. {
  552. struct inode *inode = dentry->d_inode;
  553. int err = 0;
  554. int ret;
  555. struct btrfs_root *root = BTRFS_I(dir)->root;
  556. struct btrfs_trans_handle *trans;
  557. unsigned long nr = 0;
  558. if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
  559. return -ENOTEMPTY;
  560. mutex_lock(&root->fs_info->fs_mutex);
  561. ret = btrfs_check_free_space(root, 1, 1);
  562. if (ret)
  563. goto fail;
  564. trans = btrfs_start_transaction(root, 1);
  565. btrfs_set_trans_block_group(trans, dir);
  566. /* now the directory is empty */
  567. err = btrfs_unlink_trans(trans, root, dir, dentry);
  568. if (!err) {
  569. inode->i_size = 0;
  570. }
  571. nr = trans->blocks_used;
  572. ret = btrfs_end_transaction(trans, root);
  573. fail:
  574. mutex_unlock(&root->fs_info->fs_mutex);
  575. btrfs_btree_balance_dirty(root, nr);
  576. btrfs_throttle(root);
  577. if (ret && !err)
  578. err = ret;
  579. return err;
  580. }
  581. static int btrfs_free_inode(struct btrfs_trans_handle *trans,
  582. struct btrfs_root *root,
  583. struct inode *inode)
  584. {
  585. struct btrfs_path *path;
  586. int ret;
  587. clear_inode(inode);
  588. path = btrfs_alloc_path();
  589. BUG_ON(!path);
  590. ret = btrfs_lookup_inode(trans, root, path,
  591. &BTRFS_I(inode)->location, -1);
  592. if (ret > 0)
  593. ret = -ENOENT;
  594. if (!ret)
  595. ret = btrfs_del_item(trans, root, path);
  596. btrfs_free_path(path);
  597. return ret;
  598. }
  599. /*
  600. * this can truncate away extent items, csum items and directory items.
  601. * It starts at a high offset and removes keys until it can't find
  602. * any higher than i_size.
  603. *
  604. * csum items that cross the new i_size are truncated to the new size
  605. * as well.
  606. */
  607. static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
  608. struct btrfs_root *root,
  609. struct inode *inode)
  610. {
  611. int ret;
  612. struct btrfs_path *path;
  613. struct btrfs_key key;
  614. struct btrfs_key found_key;
  615. u32 found_type;
  616. struct extent_buffer *leaf;
  617. struct btrfs_file_extent_item *fi;
  618. u64 extent_start = 0;
  619. u64 extent_num_bytes = 0;
  620. u64 item_end = 0;
  621. u64 root_gen = 0;
  622. u64 root_owner = 0;
  623. int found_extent;
  624. int del_item;
  625. int extent_type = -1;
  626. btrfs_drop_extent_cache(inode, inode->i_size, (u64)-1);
  627. path = btrfs_alloc_path();
  628. path->reada = -1;
  629. BUG_ON(!path);
  630. /* FIXME, add redo link to tree so we don't leak on crash */
  631. key.objectid = inode->i_ino;
  632. key.offset = (u64)-1;
  633. key.type = (u8)-1;
  634. while(1) {
  635. btrfs_init_path(path);
  636. fi = NULL;
  637. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  638. if (ret < 0) {
  639. goto error;
  640. }
  641. if (ret > 0) {
  642. BUG_ON(path->slots[0] == 0);
  643. path->slots[0]--;
  644. }
  645. leaf = path->nodes[0];
  646. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  647. found_type = btrfs_key_type(&found_key);
  648. if (found_key.objectid != inode->i_ino)
  649. break;
  650. if (found_type != BTRFS_CSUM_ITEM_KEY &&
  651. found_type != BTRFS_DIR_ITEM_KEY &&
  652. found_type != BTRFS_DIR_INDEX_KEY &&
  653. found_type != BTRFS_EXTENT_DATA_KEY)
  654. break;
  655. item_end = found_key.offset;
  656. if (found_type == BTRFS_EXTENT_DATA_KEY) {
  657. fi = btrfs_item_ptr(leaf, path->slots[0],
  658. struct btrfs_file_extent_item);
  659. extent_type = btrfs_file_extent_type(leaf, fi);
  660. if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
  661. item_end +=
  662. btrfs_file_extent_num_bytes(leaf, fi);
  663. } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
  664. struct btrfs_item *item = btrfs_item_nr(leaf,
  665. path->slots[0]);
  666. item_end += btrfs_file_extent_inline_len(leaf,
  667. item);
  668. }
  669. item_end--;
  670. }
  671. if (found_type == BTRFS_CSUM_ITEM_KEY) {
  672. ret = btrfs_csum_truncate(trans, root, path,
  673. inode->i_size);
  674. BUG_ON(ret);
  675. }
  676. if (item_end < inode->i_size) {
  677. if (found_type == BTRFS_DIR_ITEM_KEY) {
  678. found_type = BTRFS_INODE_ITEM_KEY;
  679. } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
  680. found_type = BTRFS_CSUM_ITEM_KEY;
  681. } else if (found_type) {
  682. found_type--;
  683. } else {
  684. break;
  685. }
  686. btrfs_set_key_type(&key, found_type);
  687. btrfs_release_path(root, path);
  688. continue;
  689. }
  690. if (found_key.offset >= inode->i_size)
  691. del_item = 1;
  692. else
  693. del_item = 0;
  694. found_extent = 0;
  695. /* FIXME, shrink the extent if the ref count is only 1 */
  696. if (found_type != BTRFS_EXTENT_DATA_KEY)
  697. goto delete;
  698. if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
  699. u64 num_dec;
  700. extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
  701. if (!del_item) {
  702. u64 orig_num_bytes =
  703. btrfs_file_extent_num_bytes(leaf, fi);
  704. extent_num_bytes = inode->i_size -
  705. found_key.offset + root->sectorsize - 1;
  706. btrfs_set_file_extent_num_bytes(leaf, fi,
  707. extent_num_bytes);
  708. num_dec = (orig_num_bytes -
  709. extent_num_bytes) >> 9;
  710. if (extent_start != 0) {
  711. inode->i_blocks -= num_dec;
  712. }
  713. btrfs_mark_buffer_dirty(leaf);
  714. } else {
  715. extent_num_bytes =
  716. btrfs_file_extent_disk_num_bytes(leaf,
  717. fi);
  718. /* FIXME blocksize != 4096 */
  719. num_dec = btrfs_file_extent_num_bytes(leaf,
  720. fi) >> 9;
  721. if (extent_start != 0) {
  722. found_extent = 1;
  723. inode->i_blocks -= num_dec;
  724. }
  725. root_gen = btrfs_header_generation(leaf);
  726. root_owner = btrfs_header_owner(leaf);
  727. }
  728. } else if (extent_type == BTRFS_FILE_EXTENT_INLINE &&
  729. !del_item) {
  730. u32 newsize = inode->i_size - found_key.offset;
  731. newsize = btrfs_file_extent_calc_inline_size(newsize);
  732. ret = btrfs_truncate_item(trans, root, path,
  733. newsize, 1);
  734. BUG_ON(ret);
  735. }
  736. delete:
  737. if (del_item) {
  738. ret = btrfs_del_item(trans, root, path);
  739. if (ret)
  740. goto error;
  741. } else {
  742. break;
  743. }
  744. btrfs_release_path(root, path);
  745. if (found_extent) {
  746. ret = btrfs_free_extent(trans, root, extent_start,
  747. extent_num_bytes,
  748. root_owner,
  749. root_gen, inode->i_ino,
  750. found_key.offset, 0);
  751. BUG_ON(ret);
  752. }
  753. }
  754. ret = 0;
  755. error:
  756. btrfs_release_path(root, path);
  757. btrfs_free_path(path);
  758. inode->i_sb->s_dirt = 1;
  759. return ret;
  760. }
  761. static int btrfs_cow_one_page(struct inode *inode, struct page *page,
  762. size_t zero_start)
  763. {
  764. char *kaddr;
  765. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  766. struct btrfs_root *root = BTRFS_I(inode)->root;
  767. u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  768. u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
  769. u64 existing_delalloc;
  770. u64 delalloc_start;
  771. int ret = 0;
  772. WARN_ON(!PageLocked(page));
  773. set_page_extent_mapped(page);
  774. lock_extent(em_tree, page_start, page_end, GFP_NOFS);
  775. delalloc_start = page_start;
  776. existing_delalloc = count_range_bits(&BTRFS_I(inode)->extent_tree,
  777. &delalloc_start, page_end,
  778. PAGE_CACHE_SIZE, EXTENT_DELALLOC);
  779. set_extent_delalloc(&BTRFS_I(inode)->extent_tree, page_start,
  780. page_end, GFP_NOFS);
  781. spin_lock(&root->fs_info->delalloc_lock);
  782. root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE - existing_delalloc;
  783. spin_unlock(&root->fs_info->delalloc_lock);
  784. if (zero_start != PAGE_CACHE_SIZE) {
  785. kaddr = kmap(page);
  786. memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
  787. flush_dcache_page(page);
  788. kunmap(page);
  789. }
  790. set_page_dirty(page);
  791. unlock_extent(em_tree, page_start, page_end, GFP_NOFS);
  792. return ret;
  793. }
  794. /*
  795. * taken from block_truncate_page, but does cow as it zeros out
  796. * any bytes left in the last page in the file.
  797. */
  798. static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
  799. {
  800. struct inode *inode = mapping->host;
  801. struct btrfs_root *root = BTRFS_I(inode)->root;
  802. u32 blocksize = root->sectorsize;
  803. pgoff_t index = from >> PAGE_CACHE_SHIFT;
  804. unsigned offset = from & (PAGE_CACHE_SIZE-1);
  805. struct page *page;
  806. int ret = 0;
  807. u64 page_start;
  808. if ((offset & (blocksize - 1)) == 0)
  809. goto out;
  810. ret = -ENOMEM;
  811. page = grab_cache_page(mapping, index);
  812. if (!page)
  813. goto out;
  814. if (!PageUptodate(page)) {
  815. ret = btrfs_readpage(NULL, page);
  816. lock_page(page);
  817. if (!PageUptodate(page)) {
  818. ret = -EIO;
  819. goto out;
  820. }
  821. }
  822. page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  823. ret = btrfs_cow_one_page(inode, page, offset);
  824. unlock_page(page);
  825. page_cache_release(page);
  826. out:
  827. return ret;
  828. }
  829. static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
  830. {
  831. struct inode *inode = dentry->d_inode;
  832. int err;
  833. err = inode_change_ok(inode, attr);
  834. if (err)
  835. return err;
  836. if (S_ISREG(inode->i_mode) &&
  837. attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
  838. struct btrfs_trans_handle *trans;
  839. struct btrfs_root *root = BTRFS_I(inode)->root;
  840. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  841. u64 mask = root->sectorsize - 1;
  842. u64 pos = (inode->i_size + mask) & ~mask;
  843. u64 block_end = attr->ia_size | mask;
  844. u64 hole_size;
  845. u64 alloc_hint = 0;
  846. if (attr->ia_size <= pos)
  847. goto out;
  848. mutex_lock(&root->fs_info->fs_mutex);
  849. err = btrfs_check_free_space(root, 1, 0);
  850. mutex_unlock(&root->fs_info->fs_mutex);
  851. if (err)
  852. goto fail;
  853. btrfs_truncate_page(inode->i_mapping, inode->i_size);
  854. lock_extent(em_tree, pos, block_end, GFP_NOFS);
  855. hole_size = (attr->ia_size - pos + mask) & ~mask;
  856. mutex_lock(&root->fs_info->fs_mutex);
  857. trans = btrfs_start_transaction(root, 1);
  858. btrfs_set_trans_block_group(trans, inode);
  859. err = btrfs_drop_extents(trans, root, inode,
  860. pos, pos + hole_size, pos,
  861. &alloc_hint);
  862. if (alloc_hint != EXTENT_MAP_INLINE) {
  863. err = btrfs_insert_file_extent(trans, root,
  864. inode->i_ino,
  865. pos, 0, 0, hole_size);
  866. }
  867. btrfs_end_transaction(trans, root);
  868. mutex_unlock(&root->fs_info->fs_mutex);
  869. unlock_extent(em_tree, pos, block_end, GFP_NOFS);
  870. if (err)
  871. return err;
  872. }
  873. out:
  874. err = inode_setattr(inode, attr);
  875. fail:
  876. return err;
  877. }
  878. void btrfs_delete_inode(struct inode *inode)
  879. {
  880. struct btrfs_trans_handle *trans;
  881. struct btrfs_root *root = BTRFS_I(inode)->root;
  882. unsigned long nr;
  883. int ret;
  884. truncate_inode_pages(&inode->i_data, 0);
  885. if (is_bad_inode(inode)) {
  886. goto no_delete;
  887. }
  888. inode->i_size = 0;
  889. mutex_lock(&root->fs_info->fs_mutex);
  890. trans = btrfs_start_transaction(root, 1);
  891. btrfs_set_trans_block_group(trans, inode);
  892. ret = btrfs_truncate_in_trans(trans, root, inode);
  893. if (ret)
  894. goto no_delete_lock;
  895. ret = btrfs_delete_xattrs(trans, root, inode);
  896. if (ret)
  897. goto no_delete_lock;
  898. ret = btrfs_free_inode(trans, root, inode);
  899. if (ret)
  900. goto no_delete_lock;
  901. nr = trans->blocks_used;
  902. btrfs_end_transaction(trans, root);
  903. mutex_unlock(&root->fs_info->fs_mutex);
  904. btrfs_btree_balance_dirty(root, nr);
  905. btrfs_throttle(root);
  906. return;
  907. no_delete_lock:
  908. nr = trans->blocks_used;
  909. btrfs_end_transaction(trans, root);
  910. mutex_unlock(&root->fs_info->fs_mutex);
  911. btrfs_btree_balance_dirty(root, nr);
  912. btrfs_throttle(root);
  913. no_delete:
  914. clear_inode(inode);
  915. }
  916. /*
  917. * this returns the key found in the dir entry in the location pointer.
  918. * If no dir entries were found, location->objectid is 0.
  919. */
  920. static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
  921. struct btrfs_key *location)
  922. {
  923. const char *name = dentry->d_name.name;
  924. int namelen = dentry->d_name.len;
  925. struct btrfs_dir_item *di;
  926. struct btrfs_path *path;
  927. struct btrfs_root *root = BTRFS_I(dir)->root;
  928. int ret = 0;
  929. if (namelen == 1 && strcmp(name, ".") == 0) {
  930. location->objectid = dir->i_ino;
  931. location->type = BTRFS_INODE_ITEM_KEY;
  932. location->offset = 0;
  933. return 0;
  934. }
  935. path = btrfs_alloc_path();
  936. BUG_ON(!path);
  937. if (namelen == 2 && strcmp(name, "..") == 0) {
  938. struct btrfs_key key;
  939. struct extent_buffer *leaf;
  940. u32 nritems;
  941. int slot;
  942. key.objectid = dir->i_ino;
  943. btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
  944. key.offset = 0;
  945. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  946. BUG_ON(ret == 0);
  947. ret = 0;
  948. leaf = path->nodes[0];
  949. slot = path->slots[0];
  950. nritems = btrfs_header_nritems(leaf);
  951. if (slot >= nritems)
  952. goto out_err;
  953. btrfs_item_key_to_cpu(leaf, &key, slot);
  954. if (key.objectid != dir->i_ino ||
  955. key.type != BTRFS_INODE_REF_KEY) {
  956. goto out_err;
  957. }
  958. location->objectid = key.offset;
  959. location->type = BTRFS_INODE_ITEM_KEY;
  960. location->offset = 0;
  961. goto out;
  962. }
  963. di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
  964. namelen, 0);
  965. if (IS_ERR(di))
  966. ret = PTR_ERR(di);
  967. if (!di || IS_ERR(di)) {
  968. goto out_err;
  969. }
  970. btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
  971. out:
  972. btrfs_free_path(path);
  973. return ret;
  974. out_err:
  975. location->objectid = 0;
  976. goto out;
  977. }
  978. /*
  979. * when we hit a tree root in a directory, the btrfs part of the inode
  980. * needs to be changed to reflect the root directory of the tree root. This
  981. * is kind of like crossing a mount point.
  982. */
  983. static int fixup_tree_root_location(struct btrfs_root *root,
  984. struct btrfs_key *location,
  985. struct btrfs_root **sub_root,
  986. struct dentry *dentry)
  987. {
  988. struct btrfs_path *path;
  989. struct btrfs_root_item *ri;
  990. if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
  991. return 0;
  992. if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
  993. return 0;
  994. path = btrfs_alloc_path();
  995. BUG_ON(!path);
  996. mutex_lock(&root->fs_info->fs_mutex);
  997. *sub_root = btrfs_read_fs_root(root->fs_info, location,
  998. dentry->d_name.name,
  999. dentry->d_name.len);
  1000. if (IS_ERR(*sub_root))
  1001. return PTR_ERR(*sub_root);
  1002. ri = &(*sub_root)->root_item;
  1003. location->objectid = btrfs_root_dirid(ri);
  1004. btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
  1005. location->offset = 0;
  1006. btrfs_free_path(path);
  1007. mutex_unlock(&root->fs_info->fs_mutex);
  1008. return 0;
  1009. }
  1010. static int btrfs_init_locked_inode(struct inode *inode, void *p)
  1011. {
  1012. struct btrfs_iget_args *args = p;
  1013. inode->i_ino = args->ino;
  1014. BTRFS_I(inode)->root = args->root;
  1015. extent_map_tree_init(&BTRFS_I(inode)->extent_tree,
  1016. inode->i_mapping, GFP_NOFS);
  1017. return 0;
  1018. }
  1019. static int btrfs_find_actor(struct inode *inode, void *opaque)
  1020. {
  1021. struct btrfs_iget_args *args = opaque;
  1022. return (args->ino == inode->i_ino &&
  1023. args->root == BTRFS_I(inode)->root);
  1024. }
  1025. struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
  1026. u64 root_objectid)
  1027. {
  1028. struct btrfs_iget_args args;
  1029. args.ino = objectid;
  1030. args.root = btrfs_lookup_fs_root(btrfs_sb(s)->fs_info, root_objectid);
  1031. if (!args.root)
  1032. return NULL;
  1033. return ilookup5(s, objectid, btrfs_find_actor, (void *)&args);
  1034. }
  1035. struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
  1036. struct btrfs_root *root)
  1037. {
  1038. struct inode *inode;
  1039. struct btrfs_iget_args args;
  1040. args.ino = objectid;
  1041. args.root = root;
  1042. inode = iget5_locked(s, objectid, btrfs_find_actor,
  1043. btrfs_init_locked_inode,
  1044. (void *)&args);
  1045. return inode;
  1046. }
  1047. static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
  1048. struct nameidata *nd)
  1049. {
  1050. struct inode * inode;
  1051. struct btrfs_inode *bi = BTRFS_I(dir);
  1052. struct btrfs_root *root = bi->root;
  1053. struct btrfs_root *sub_root = root;
  1054. struct btrfs_key location;
  1055. int ret;
  1056. if (dentry->d_name.len > BTRFS_NAME_LEN)
  1057. return ERR_PTR(-ENAMETOOLONG);
  1058. mutex_lock(&root->fs_info->fs_mutex);
  1059. ret = btrfs_inode_by_name(dir, dentry, &location);
  1060. mutex_unlock(&root->fs_info->fs_mutex);
  1061. if (ret < 0)
  1062. return ERR_PTR(ret);
  1063. inode = NULL;
  1064. if (location.objectid) {
  1065. ret = fixup_tree_root_location(root, &location, &sub_root,
  1066. dentry);
  1067. if (ret < 0)
  1068. return ERR_PTR(ret);
  1069. if (ret > 0)
  1070. return ERR_PTR(-ENOENT);
  1071. inode = btrfs_iget_locked(dir->i_sb, location.objectid,
  1072. sub_root);
  1073. if (!inode)
  1074. return ERR_PTR(-EACCES);
  1075. if (inode->i_state & I_NEW) {
  1076. /* the inode and parent dir are two different roots */
  1077. if (sub_root != root) {
  1078. igrab(inode);
  1079. sub_root->inode = inode;
  1080. }
  1081. BTRFS_I(inode)->root = sub_root;
  1082. memcpy(&BTRFS_I(inode)->location, &location,
  1083. sizeof(location));
  1084. btrfs_read_locked_inode(inode);
  1085. unlock_new_inode(inode);
  1086. }
  1087. }
  1088. return d_splice_alias(inode, dentry);
  1089. }
  1090. static unsigned char btrfs_filetype_table[] = {
  1091. DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
  1092. };
  1093. static int btrfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
  1094. {
  1095. struct inode *inode = filp->f_dentry->d_inode;
  1096. struct btrfs_root *root = BTRFS_I(inode)->root;
  1097. struct btrfs_item *item;
  1098. struct btrfs_dir_item *di;
  1099. struct btrfs_key key;
  1100. struct btrfs_key found_key;
  1101. struct btrfs_path *path;
  1102. int ret;
  1103. u32 nritems;
  1104. struct extent_buffer *leaf;
  1105. int slot;
  1106. int advance;
  1107. unsigned char d_type;
  1108. int over = 0;
  1109. u32 di_cur;
  1110. u32 di_total;
  1111. u32 di_len;
  1112. int key_type = BTRFS_DIR_INDEX_KEY;
  1113. char tmp_name[32];
  1114. char *name_ptr;
  1115. int name_len;
  1116. /* FIXME, use a real flag for deciding about the key type */
  1117. if (root->fs_info->tree_root == root)
  1118. key_type = BTRFS_DIR_ITEM_KEY;
  1119. /* special case for "." */
  1120. if (filp->f_pos == 0) {
  1121. over = filldir(dirent, ".", 1,
  1122. 1, inode->i_ino,
  1123. DT_DIR);
  1124. if (over)
  1125. return 0;
  1126. filp->f_pos = 1;
  1127. }
  1128. mutex_lock(&root->fs_info->fs_mutex);
  1129. key.objectid = inode->i_ino;
  1130. path = btrfs_alloc_path();
  1131. path->reada = 2;
  1132. /* special case for .., just use the back ref */
  1133. if (filp->f_pos == 1) {
  1134. btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
  1135. key.offset = 0;
  1136. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  1137. BUG_ON(ret == 0);
  1138. leaf = path->nodes[0];
  1139. slot = path->slots[0];
  1140. nritems = btrfs_header_nritems(leaf);
  1141. if (slot >= nritems) {
  1142. btrfs_release_path(root, path);
  1143. goto read_dir_items;
  1144. }
  1145. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  1146. btrfs_release_path(root, path);
  1147. if (found_key.objectid != key.objectid ||
  1148. found_key.type != BTRFS_INODE_REF_KEY)
  1149. goto read_dir_items;
  1150. over = filldir(dirent, "..", 2,
  1151. 2, found_key.offset, DT_DIR);
  1152. if (over)
  1153. goto nopos;
  1154. filp->f_pos = 2;
  1155. }
  1156. read_dir_items:
  1157. btrfs_set_key_type(&key, key_type);
  1158. key.offset = filp->f_pos;
  1159. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  1160. if (ret < 0)
  1161. goto err;
  1162. advance = 0;
  1163. while(1) {
  1164. leaf = path->nodes[0];
  1165. nritems = btrfs_header_nritems(leaf);
  1166. slot = path->slots[0];
  1167. if (advance || slot >= nritems) {
  1168. if (slot >= nritems -1) {
  1169. ret = btrfs_next_leaf(root, path);
  1170. if (ret)
  1171. break;
  1172. leaf = path->nodes[0];
  1173. nritems = btrfs_header_nritems(leaf);
  1174. slot = path->slots[0];
  1175. } else {
  1176. slot++;
  1177. path->slots[0]++;
  1178. }
  1179. }
  1180. advance = 1;
  1181. item = btrfs_item_nr(leaf, slot);
  1182. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  1183. if (found_key.objectid != key.objectid)
  1184. break;
  1185. if (btrfs_key_type(&found_key) != key_type)
  1186. break;
  1187. if (found_key.offset < filp->f_pos)
  1188. continue;
  1189. filp->f_pos = found_key.offset;
  1190. advance = 1;
  1191. di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
  1192. di_cur = 0;
  1193. di_total = btrfs_item_size(leaf, item);
  1194. while(di_cur < di_total) {
  1195. struct btrfs_key location;
  1196. name_len = btrfs_dir_name_len(leaf, di);
  1197. if (name_len < 32) {
  1198. name_ptr = tmp_name;
  1199. } else {
  1200. name_ptr = kmalloc(name_len, GFP_NOFS);
  1201. BUG_ON(!name_ptr);
  1202. }
  1203. read_extent_buffer(leaf, name_ptr,
  1204. (unsigned long)(di + 1), name_len);
  1205. d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
  1206. btrfs_dir_item_key_to_cpu(leaf, di, &location);
  1207. over = filldir(dirent, name_ptr, name_len,
  1208. found_key.offset,
  1209. location.objectid,
  1210. d_type);
  1211. if (name_ptr != tmp_name)
  1212. kfree(name_ptr);
  1213. if (over)
  1214. goto nopos;
  1215. di_len = btrfs_dir_name_len(leaf, di) +
  1216. btrfs_dir_data_len(leaf, di) +sizeof(*di);
  1217. di_cur += di_len;
  1218. di = (struct btrfs_dir_item *)((char *)di + di_len);
  1219. }
  1220. }
  1221. filp->f_pos++;
  1222. nopos:
  1223. ret = 0;
  1224. err:
  1225. btrfs_release_path(root, path);
  1226. btrfs_free_path(path);
  1227. mutex_unlock(&root->fs_info->fs_mutex);
  1228. return ret;
  1229. }
  1230. int btrfs_write_inode(struct inode *inode, int wait)
  1231. {
  1232. struct btrfs_root *root = BTRFS_I(inode)->root;
  1233. struct btrfs_trans_handle *trans;
  1234. int ret = 0;
  1235. if (wait) {
  1236. mutex_lock(&root->fs_info->fs_mutex);
  1237. trans = btrfs_start_transaction(root, 1);
  1238. btrfs_set_trans_block_group(trans, inode);
  1239. ret = btrfs_commit_transaction(trans, root);
  1240. mutex_unlock(&root->fs_info->fs_mutex);
  1241. }
  1242. return ret;
  1243. }
  1244. /*
  1245. * This is somewhat expensive, updating the tree every time the
  1246. * inode changes. But, it is most likely to find the inode in cache.
  1247. * FIXME, needs more benchmarking...there are no reasons other than performance
  1248. * to keep or drop this code.
  1249. */
  1250. void btrfs_dirty_inode(struct inode *inode)
  1251. {
  1252. struct btrfs_root *root = BTRFS_I(inode)->root;
  1253. struct btrfs_trans_handle *trans;
  1254. mutex_lock(&root->fs_info->fs_mutex);
  1255. trans = btrfs_start_transaction(root, 1);
  1256. btrfs_set_trans_block_group(trans, inode);
  1257. btrfs_update_inode(trans, root, inode);
  1258. btrfs_end_transaction(trans, root);
  1259. mutex_unlock(&root->fs_info->fs_mutex);
  1260. }
  1261. static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
  1262. struct btrfs_root *root,
  1263. u64 objectid,
  1264. struct btrfs_block_group_cache *group,
  1265. int mode)
  1266. {
  1267. struct inode *inode;
  1268. struct btrfs_inode_item *inode_item;
  1269. struct btrfs_key *location;
  1270. struct btrfs_path *path;
  1271. int ret;
  1272. int owner;
  1273. path = btrfs_alloc_path();
  1274. BUG_ON(!path);
  1275. inode = new_inode(root->fs_info->sb);
  1276. if (!inode)
  1277. return ERR_PTR(-ENOMEM);
  1278. extent_map_tree_init(&BTRFS_I(inode)->extent_tree,
  1279. inode->i_mapping, GFP_NOFS);
  1280. BTRFS_I(inode)->root = root;
  1281. if (mode & S_IFDIR)
  1282. owner = 0;
  1283. else
  1284. owner = 1;
  1285. group = btrfs_find_block_group(root, group, 0, 0, owner);
  1286. BTRFS_I(inode)->block_group = group;
  1287. ret = btrfs_insert_empty_inode(trans, root, path, objectid);
  1288. if (ret)
  1289. goto fail;
  1290. inode->i_uid = current->fsuid;
  1291. inode->i_gid = current->fsgid;
  1292. inode->i_mode = mode;
  1293. inode->i_ino = objectid;
  1294. inode->i_blocks = 0;
  1295. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  1296. inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
  1297. struct btrfs_inode_item);
  1298. fill_inode_item(path->nodes[0], inode_item, inode);
  1299. btrfs_mark_buffer_dirty(path->nodes[0]);
  1300. btrfs_free_path(path);
  1301. location = &BTRFS_I(inode)->location;
  1302. location->objectid = objectid;
  1303. location->offset = 0;
  1304. btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
  1305. insert_inode_hash(inode);
  1306. return inode;
  1307. fail:
  1308. btrfs_free_path(path);
  1309. return ERR_PTR(ret);
  1310. }
  1311. static inline u8 btrfs_inode_type(struct inode *inode)
  1312. {
  1313. return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
  1314. }
  1315. static int btrfs_add_link(struct btrfs_trans_handle *trans,
  1316. struct dentry *dentry, struct inode *inode)
  1317. {
  1318. int ret;
  1319. struct btrfs_key key;
  1320. struct btrfs_root *root = BTRFS_I(dentry->d_parent->d_inode)->root;
  1321. struct inode *parent_inode;
  1322. key.objectid = inode->i_ino;
  1323. btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
  1324. key.offset = 0;
  1325. ret = btrfs_insert_dir_item(trans, root,
  1326. dentry->d_name.name, dentry->d_name.len,
  1327. dentry->d_parent->d_inode->i_ino,
  1328. &key, btrfs_inode_type(inode));
  1329. if (ret == 0) {
  1330. ret = btrfs_insert_inode_ref(trans, root,
  1331. dentry->d_name.name,
  1332. dentry->d_name.len,
  1333. inode->i_ino,
  1334. dentry->d_parent->d_inode->i_ino);
  1335. parent_inode = dentry->d_parent->d_inode;
  1336. parent_inode->i_size += dentry->d_name.len * 2;
  1337. parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
  1338. ret = btrfs_update_inode(trans, root,
  1339. dentry->d_parent->d_inode);
  1340. }
  1341. return ret;
  1342. }
  1343. static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
  1344. struct dentry *dentry, struct inode *inode)
  1345. {
  1346. int err = btrfs_add_link(trans, dentry, inode);
  1347. if (!err) {
  1348. d_instantiate(dentry, inode);
  1349. return 0;
  1350. }
  1351. if (err > 0)
  1352. err = -EEXIST;
  1353. return err;
  1354. }
  1355. static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
  1356. int mode, dev_t rdev)
  1357. {
  1358. struct btrfs_trans_handle *trans;
  1359. struct btrfs_root *root = BTRFS_I(dir)->root;
  1360. struct inode *inode = NULL;
  1361. int err;
  1362. int drop_inode = 0;
  1363. u64 objectid;
  1364. unsigned long nr = 0;
  1365. if (!new_valid_dev(rdev))
  1366. return -EINVAL;
  1367. mutex_lock(&root->fs_info->fs_mutex);
  1368. err = btrfs_check_free_space(root, 1, 0);
  1369. if (err)
  1370. goto fail;
  1371. trans = btrfs_start_transaction(root, 1);
  1372. btrfs_set_trans_block_group(trans, dir);
  1373. err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
  1374. if (err) {
  1375. err = -ENOSPC;
  1376. goto out_unlock;
  1377. }
  1378. inode = btrfs_new_inode(trans, root, objectid,
  1379. BTRFS_I(dir)->block_group, mode);
  1380. err = PTR_ERR(inode);
  1381. if (IS_ERR(inode))
  1382. goto out_unlock;
  1383. btrfs_set_trans_block_group(trans, inode);
  1384. err = btrfs_add_nondir(trans, dentry, inode);
  1385. if (err)
  1386. drop_inode = 1;
  1387. else {
  1388. inode->i_op = &btrfs_special_inode_operations;
  1389. init_special_inode(inode, inode->i_mode, rdev);
  1390. btrfs_update_inode(trans, root, inode);
  1391. }
  1392. dir->i_sb->s_dirt = 1;
  1393. btrfs_update_inode_block_group(trans, inode);
  1394. btrfs_update_inode_block_group(trans, dir);
  1395. out_unlock:
  1396. nr = trans->blocks_used;
  1397. btrfs_end_transaction(trans, root);
  1398. fail:
  1399. mutex_unlock(&root->fs_info->fs_mutex);
  1400. if (drop_inode) {
  1401. inode_dec_link_count(inode);
  1402. iput(inode);
  1403. }
  1404. btrfs_btree_balance_dirty(root, nr);
  1405. btrfs_throttle(root);
  1406. return err;
  1407. }
  1408. static int btrfs_create(struct inode *dir, struct dentry *dentry,
  1409. int mode, struct nameidata *nd)
  1410. {
  1411. struct btrfs_trans_handle *trans;
  1412. struct btrfs_root *root = BTRFS_I(dir)->root;
  1413. struct inode *inode = NULL;
  1414. int err;
  1415. int drop_inode = 0;
  1416. unsigned long nr = 0;
  1417. u64 objectid;
  1418. mutex_lock(&root->fs_info->fs_mutex);
  1419. err = btrfs_check_free_space(root, 1, 0);
  1420. if (err)
  1421. goto fail;
  1422. trans = btrfs_start_transaction(root, 1);
  1423. btrfs_set_trans_block_group(trans, dir);
  1424. err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
  1425. if (err) {
  1426. err = -ENOSPC;
  1427. goto out_unlock;
  1428. }
  1429. inode = btrfs_new_inode(trans, root, objectid,
  1430. BTRFS_I(dir)->block_group, mode);
  1431. err = PTR_ERR(inode);
  1432. if (IS_ERR(inode))
  1433. goto out_unlock;
  1434. btrfs_set_trans_block_group(trans, inode);
  1435. err = btrfs_add_nondir(trans, dentry, inode);
  1436. if (err)
  1437. drop_inode = 1;
  1438. else {
  1439. inode->i_mapping->a_ops = &btrfs_aops;
  1440. inode->i_fop = &btrfs_file_operations;
  1441. inode->i_op = &btrfs_file_inode_operations;
  1442. extent_map_tree_init(&BTRFS_I(inode)->extent_tree,
  1443. inode->i_mapping, GFP_NOFS);
  1444. BTRFS_I(inode)->extent_tree.ops = &btrfs_extent_map_ops;
  1445. }
  1446. dir->i_sb->s_dirt = 1;
  1447. btrfs_update_inode_block_group(trans, inode);
  1448. btrfs_update_inode_block_group(trans, dir);
  1449. out_unlock:
  1450. nr = trans->blocks_used;
  1451. btrfs_end_transaction(trans, root);
  1452. fail:
  1453. mutex_unlock(&root->fs_info->fs_mutex);
  1454. if (drop_inode) {
  1455. inode_dec_link_count(inode);
  1456. iput(inode);
  1457. }
  1458. btrfs_btree_balance_dirty(root, nr);
  1459. btrfs_throttle(root);
  1460. return err;
  1461. }
  1462. static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
  1463. struct dentry *dentry)
  1464. {
  1465. struct btrfs_trans_handle *trans;
  1466. struct btrfs_root *root = BTRFS_I(dir)->root;
  1467. struct inode *inode = old_dentry->d_inode;
  1468. unsigned long nr = 0;
  1469. int err;
  1470. int drop_inode = 0;
  1471. if (inode->i_nlink == 0)
  1472. return -ENOENT;
  1473. #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
  1474. inode->i_nlink++;
  1475. #else
  1476. inc_nlink(inode);
  1477. #endif
  1478. mutex_lock(&root->fs_info->fs_mutex);
  1479. err = btrfs_check_free_space(root, 1, 0);
  1480. if (err)
  1481. goto fail;
  1482. trans = btrfs_start_transaction(root, 1);
  1483. btrfs_set_trans_block_group(trans, dir);
  1484. atomic_inc(&inode->i_count);
  1485. err = btrfs_add_nondir(trans, dentry, inode);
  1486. if (err)
  1487. drop_inode = 1;
  1488. dir->i_sb->s_dirt = 1;
  1489. btrfs_update_inode_block_group(trans, dir);
  1490. err = btrfs_update_inode(trans, root, inode);
  1491. if (err)
  1492. drop_inode = 1;
  1493. nr = trans->blocks_used;
  1494. btrfs_end_transaction(trans, root);
  1495. fail:
  1496. mutex_unlock(&root->fs_info->fs_mutex);
  1497. if (drop_inode) {
  1498. inode_dec_link_count(inode);
  1499. iput(inode);
  1500. }
  1501. btrfs_btree_balance_dirty(root, nr);
  1502. btrfs_throttle(root);
  1503. return err;
  1504. }
  1505. static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
  1506. {
  1507. struct inode *inode;
  1508. struct btrfs_trans_handle *trans;
  1509. struct btrfs_root *root = BTRFS_I(dir)->root;
  1510. int err = 0;
  1511. int drop_on_err = 0;
  1512. u64 objectid;
  1513. unsigned long nr = 1;
  1514. mutex_lock(&root->fs_info->fs_mutex);
  1515. err = btrfs_check_free_space(root, 1, 0);
  1516. if (err)
  1517. goto out_unlock;
  1518. trans = btrfs_start_transaction(root, 1);
  1519. btrfs_set_trans_block_group(trans, dir);
  1520. if (IS_ERR(trans)) {
  1521. err = PTR_ERR(trans);
  1522. goto out_unlock;
  1523. }
  1524. err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
  1525. if (err) {
  1526. err = -ENOSPC;
  1527. goto out_unlock;
  1528. }
  1529. inode = btrfs_new_inode(trans, root, objectid,
  1530. BTRFS_I(dir)->block_group, S_IFDIR | mode);
  1531. if (IS_ERR(inode)) {
  1532. err = PTR_ERR(inode);
  1533. goto out_fail;
  1534. }
  1535. drop_on_err = 1;
  1536. inode->i_op = &btrfs_dir_inode_operations;
  1537. inode->i_fop = &btrfs_dir_file_operations;
  1538. btrfs_set_trans_block_group(trans, inode);
  1539. inode->i_size = 0;
  1540. err = btrfs_update_inode(trans, root, inode);
  1541. if (err)
  1542. goto out_fail;
  1543. err = btrfs_add_link(trans, dentry, inode);
  1544. if (err)
  1545. goto out_fail;
  1546. d_instantiate(dentry, inode);
  1547. drop_on_err = 0;
  1548. dir->i_sb->s_dirt = 1;
  1549. btrfs_update_inode_block_group(trans, inode);
  1550. btrfs_update_inode_block_group(trans, dir);
  1551. out_fail:
  1552. nr = trans->blocks_used;
  1553. btrfs_end_transaction(trans, root);
  1554. out_unlock:
  1555. mutex_unlock(&root->fs_info->fs_mutex);
  1556. if (drop_on_err)
  1557. iput(inode);
  1558. btrfs_btree_balance_dirty(root, nr);
  1559. btrfs_throttle(root);
  1560. return err;
  1561. }
  1562. struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
  1563. size_t page_offset, u64 start, u64 end,
  1564. int create)
  1565. {
  1566. int ret;
  1567. int err = 0;
  1568. u64 bytenr;
  1569. u64 extent_start = 0;
  1570. u64 extent_end = 0;
  1571. u64 objectid = inode->i_ino;
  1572. u32 found_type;
  1573. int failed_insert = 0;
  1574. struct btrfs_path *path;
  1575. struct btrfs_root *root = BTRFS_I(inode)->root;
  1576. struct btrfs_file_extent_item *item;
  1577. struct extent_buffer *leaf;
  1578. struct btrfs_key found_key;
  1579. struct extent_map *em = NULL;
  1580. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  1581. struct btrfs_trans_handle *trans = NULL;
  1582. path = btrfs_alloc_path();
  1583. BUG_ON(!path);
  1584. mutex_lock(&root->fs_info->fs_mutex);
  1585. again:
  1586. em = lookup_extent_mapping(em_tree, start, end);
  1587. if (em) {
  1588. if (em->start > start) {
  1589. printk("get_extent start %Lu em start %Lu\n",
  1590. start, em->start);
  1591. WARN_ON(1);
  1592. }
  1593. goto out;
  1594. }
  1595. if (!em) {
  1596. em = alloc_extent_map(GFP_NOFS);
  1597. if (!em) {
  1598. err = -ENOMEM;
  1599. goto out;
  1600. }
  1601. em->start = EXTENT_MAP_HOLE;
  1602. em->end = EXTENT_MAP_HOLE;
  1603. }
  1604. em->bdev = inode->i_sb->s_bdev;
  1605. ret = btrfs_lookup_file_extent(trans, root, path,
  1606. objectid, start, trans != NULL);
  1607. if (ret < 0) {
  1608. err = ret;
  1609. goto out;
  1610. }
  1611. if (ret != 0) {
  1612. if (path->slots[0] == 0)
  1613. goto not_found;
  1614. path->slots[0]--;
  1615. }
  1616. leaf = path->nodes[0];
  1617. item = btrfs_item_ptr(leaf, path->slots[0],
  1618. struct btrfs_file_extent_item);
  1619. /* are we inside the extent that was found? */
  1620. btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
  1621. found_type = btrfs_key_type(&found_key);
  1622. if (found_key.objectid != objectid ||
  1623. found_type != BTRFS_EXTENT_DATA_KEY) {
  1624. goto not_found;
  1625. }
  1626. found_type = btrfs_file_extent_type(leaf, item);
  1627. extent_start = found_key.offset;
  1628. if (found_type == BTRFS_FILE_EXTENT_REG) {
  1629. extent_end = extent_start +
  1630. btrfs_file_extent_num_bytes(leaf, item);
  1631. err = 0;
  1632. if (start < extent_start || start >= extent_end) {
  1633. em->start = start;
  1634. if (start < extent_start) {
  1635. if (end < extent_start)
  1636. goto not_found;
  1637. em->end = extent_end - 1;
  1638. } else {
  1639. em->end = end;
  1640. }
  1641. goto not_found_em;
  1642. }
  1643. bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
  1644. if (bytenr == 0) {
  1645. em->start = extent_start;
  1646. em->end = extent_end - 1;
  1647. em->block_start = EXTENT_MAP_HOLE;
  1648. em->block_end = EXTENT_MAP_HOLE;
  1649. goto insert;
  1650. }
  1651. bytenr += btrfs_file_extent_offset(leaf, item);
  1652. em->block_start = bytenr;
  1653. em->block_end = em->block_start +
  1654. btrfs_file_extent_num_bytes(leaf, item) - 1;
  1655. em->start = extent_start;
  1656. em->end = extent_end - 1;
  1657. goto insert;
  1658. } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
  1659. unsigned long ptr;
  1660. char *map;
  1661. size_t size;
  1662. size_t extent_offset;
  1663. size_t copy_size;
  1664. size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf,
  1665. path->slots[0]));
  1666. extent_end = (extent_start + size - 1) |
  1667. ((u64)root->sectorsize - 1);
  1668. if (start < extent_start || start >= extent_end) {
  1669. em->start = start;
  1670. if (start < extent_start) {
  1671. if (end < extent_start)
  1672. goto not_found;
  1673. em->end = extent_end;
  1674. } else {
  1675. em->end = end;
  1676. }
  1677. goto not_found_em;
  1678. }
  1679. em->block_start = EXTENT_MAP_INLINE;
  1680. em->block_end = EXTENT_MAP_INLINE;
  1681. if (!page) {
  1682. em->start = extent_start;
  1683. em->end = extent_start + size - 1;
  1684. goto out;
  1685. }
  1686. extent_offset = ((u64)page->index << PAGE_CACHE_SHIFT) -
  1687. extent_start + page_offset;
  1688. copy_size = min_t(u64, PAGE_CACHE_SIZE - page_offset,
  1689. size - extent_offset);
  1690. em->start = extent_start + extent_offset;
  1691. em->end = (em->start + copy_size -1) |
  1692. ((u64)root->sectorsize -1);
  1693. map = kmap(page);
  1694. ptr = btrfs_file_extent_inline_start(item) + extent_offset;
  1695. if (create == 0 && !PageUptodate(page)) {
  1696. read_extent_buffer(leaf, map + page_offset, ptr,
  1697. copy_size);
  1698. flush_dcache_page(page);
  1699. } else if (create && PageUptodate(page)) {
  1700. if (!trans) {
  1701. kunmap(page);
  1702. free_extent_map(em);
  1703. em = NULL;
  1704. btrfs_release_path(root, path);
  1705. trans = btrfs_start_transaction(root, 1);
  1706. goto again;
  1707. }
  1708. write_extent_buffer(leaf, map + page_offset, ptr,
  1709. copy_size);
  1710. btrfs_mark_buffer_dirty(leaf);
  1711. }
  1712. kunmap(page);
  1713. set_extent_uptodate(em_tree, em->start, em->end, GFP_NOFS);
  1714. goto insert;
  1715. } else {
  1716. printk("unkknown found_type %d\n", found_type);
  1717. WARN_ON(1);
  1718. }
  1719. not_found:
  1720. em->start = start;
  1721. em->end = end;
  1722. not_found_em:
  1723. em->block_start = EXTENT_MAP_HOLE;
  1724. em->block_end = EXTENT_MAP_HOLE;
  1725. insert:
  1726. btrfs_release_path(root, path);
  1727. if (em->start > start || em->end < start) {
  1728. printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->end, start, end);
  1729. err = -EIO;
  1730. goto out;
  1731. }
  1732. ret = add_extent_mapping(em_tree, em);
  1733. if (ret == -EEXIST) {
  1734. free_extent_map(em);
  1735. em = NULL;
  1736. if (0 && failed_insert == 1) {
  1737. btrfs_drop_extent_cache(inode, start, end);
  1738. }
  1739. failed_insert++;
  1740. if (failed_insert > 5) {
  1741. printk("failing to insert %Lu %Lu\n", start, end);
  1742. err = -EIO;
  1743. goto out;
  1744. }
  1745. goto again;
  1746. }
  1747. err = 0;
  1748. out:
  1749. btrfs_free_path(path);
  1750. if (trans) {
  1751. ret = btrfs_end_transaction(trans, root);
  1752. if (!err)
  1753. err = ret;
  1754. }
  1755. mutex_unlock(&root->fs_info->fs_mutex);
  1756. if (err) {
  1757. free_extent_map(em);
  1758. WARN_ON(1);
  1759. return ERR_PTR(err);
  1760. }
  1761. return em;
  1762. }
  1763. static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
  1764. {
  1765. return extent_bmap(mapping, iblock, btrfs_get_extent);
  1766. }
  1767. int btrfs_readpage(struct file *file, struct page *page)
  1768. {
  1769. struct extent_map_tree *tree;
  1770. tree = &BTRFS_I(page->mapping->host)->extent_tree;
  1771. return extent_read_full_page(tree, page, btrfs_get_extent);
  1772. }
  1773. static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
  1774. {
  1775. struct extent_map_tree *tree;
  1776. if (current->flags & PF_MEMALLOC) {
  1777. redirty_page_for_writepage(wbc, page);
  1778. unlock_page(page);
  1779. return 0;
  1780. }
  1781. tree = &BTRFS_I(page->mapping->host)->extent_tree;
  1782. return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
  1783. }
  1784. static int btrfs_writepages(struct address_space *mapping,
  1785. struct writeback_control *wbc)
  1786. {
  1787. struct extent_map_tree *tree;
  1788. tree = &BTRFS_I(mapping->host)->extent_tree;
  1789. return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
  1790. }
  1791. static int
  1792. btrfs_readpages(struct file *file, struct address_space *mapping,
  1793. struct list_head *pages, unsigned nr_pages)
  1794. {
  1795. struct extent_map_tree *tree;
  1796. tree = &BTRFS_I(mapping->host)->extent_tree;
  1797. return extent_readpages(tree, mapping, pages, nr_pages,
  1798. btrfs_get_extent);
  1799. }
  1800. static int btrfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
  1801. {
  1802. struct extent_map_tree *tree;
  1803. int ret;
  1804. tree = &BTRFS_I(page->mapping->host)->extent_tree;
  1805. ret = try_release_extent_mapping(tree, page);
  1806. if (ret == 1) {
  1807. ClearPagePrivate(page);
  1808. set_page_private(page, 0);
  1809. page_cache_release(page);
  1810. }
  1811. return ret;
  1812. }
  1813. static void btrfs_invalidatepage(struct page *page, unsigned long offset)
  1814. {
  1815. struct extent_map_tree *tree;
  1816. tree = &BTRFS_I(page->mapping->host)->extent_tree;
  1817. extent_invalidatepage(tree, page, offset);
  1818. btrfs_releasepage(page, GFP_NOFS);
  1819. }
  1820. /*
  1821. * btrfs_page_mkwrite() is not allowed to change the file size as it gets
  1822. * called from a page fault handler when a page is first dirtied. Hence we must
  1823. * be careful to check for EOF conditions here. We set the page up correctly
  1824. * for a written page which means we get ENOSPC checking when writing into
  1825. * holes and correct delalloc and unwritten extent mapping on filesystems that
  1826. * support these features.
  1827. *
  1828. * We are not allowed to take the i_mutex here so we have to play games to
  1829. * protect against truncate races as the page could now be beyond EOF. Because
  1830. * vmtruncate() writes the inode size before removing pages, once we have the
  1831. * page lock we can determine safely if the page is beyond EOF. If it is not
  1832. * beyond EOF, then the page is guaranteed safe against truncation until we
  1833. * unlock the page.
  1834. */
  1835. int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
  1836. {
  1837. struct inode *inode = fdentry(vma->vm_file)->d_inode;
  1838. struct btrfs_root *root = BTRFS_I(inode)->root;
  1839. unsigned long end;
  1840. loff_t size;
  1841. int ret;
  1842. u64 page_start;
  1843. mutex_lock(&root->fs_info->fs_mutex);
  1844. ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
  1845. mutex_unlock(&root->fs_info->fs_mutex);
  1846. if (ret)
  1847. goto out;
  1848. ret = -EINVAL;
  1849. lock_page(page);
  1850. wait_on_page_writeback(page);
  1851. size = i_size_read(inode);
  1852. page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  1853. if ((page->mapping != inode->i_mapping) ||
  1854. (page_start > size)) {
  1855. /* page got truncated out from underneath us */
  1856. goto out_unlock;
  1857. }
  1858. /* page is wholly or partially inside EOF */
  1859. if (page_start + PAGE_CACHE_SIZE > size)
  1860. end = size & ~PAGE_CACHE_MASK;
  1861. else
  1862. end = PAGE_CACHE_SIZE;
  1863. ret = btrfs_cow_one_page(inode, page, end);
  1864. out_unlock:
  1865. unlock_page(page);
  1866. out:
  1867. return ret;
  1868. }
  1869. static void btrfs_truncate(struct inode *inode)
  1870. {
  1871. struct btrfs_root *root = BTRFS_I(inode)->root;
  1872. int ret;
  1873. struct btrfs_trans_handle *trans;
  1874. unsigned long nr;
  1875. if (!S_ISREG(inode->i_mode))
  1876. return;
  1877. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  1878. return;
  1879. btrfs_truncate_page(inode->i_mapping, inode->i_size);
  1880. mutex_lock(&root->fs_info->fs_mutex);
  1881. trans = btrfs_start_transaction(root, 1);
  1882. btrfs_set_trans_block_group(trans, inode);
  1883. /* FIXME, add redo link to tree so we don't leak on crash */
  1884. ret = btrfs_truncate_in_trans(trans, root, inode);
  1885. btrfs_update_inode(trans, root, inode);
  1886. nr = trans->blocks_used;
  1887. ret = btrfs_end_transaction(trans, root);
  1888. BUG_ON(ret);
  1889. mutex_unlock(&root->fs_info->fs_mutex);
  1890. btrfs_btree_balance_dirty(root, nr);
  1891. btrfs_throttle(root);
  1892. }
  1893. static int noinline create_subvol(struct btrfs_root *root, char *name,
  1894. int namelen)
  1895. {
  1896. struct btrfs_trans_handle *trans;
  1897. struct btrfs_key key;
  1898. struct btrfs_root_item root_item;
  1899. struct btrfs_inode_item *inode_item;
  1900. struct extent_buffer *leaf;
  1901. struct btrfs_root *new_root = root;
  1902. struct inode *inode;
  1903. struct inode *dir;
  1904. int ret;
  1905. int err;
  1906. u64 objectid;
  1907. u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
  1908. unsigned long nr = 1;
  1909. mutex_lock(&root->fs_info->fs_mutex);
  1910. ret = btrfs_check_free_space(root, 1, 0);
  1911. if (ret)
  1912. goto fail_commit;
  1913. trans = btrfs_start_transaction(root, 1);
  1914. BUG_ON(!trans);
  1915. ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
  1916. 0, &objectid);
  1917. if (ret)
  1918. goto fail;
  1919. leaf = __btrfs_alloc_free_block(trans, root, root->leafsize,
  1920. objectid, trans->transid, 0, 0,
  1921. 0, 0);
  1922. if (IS_ERR(leaf))
  1923. return PTR_ERR(leaf);
  1924. btrfs_set_header_nritems(leaf, 0);
  1925. btrfs_set_header_level(leaf, 0);
  1926. btrfs_set_header_bytenr(leaf, leaf->start);
  1927. btrfs_set_header_generation(leaf, trans->transid);
  1928. btrfs_set_header_owner(leaf, objectid);
  1929. write_extent_buffer(leaf, root->fs_info->fsid,
  1930. (unsigned long)btrfs_header_fsid(leaf),
  1931. BTRFS_FSID_SIZE);
  1932. btrfs_mark_buffer_dirty(leaf);
  1933. inode_item = &root_item.inode;
  1934. memset(inode_item, 0, sizeof(*inode_item));
  1935. inode_item->generation = cpu_to_le64(1);
  1936. inode_item->size = cpu_to_le64(3);
  1937. inode_item->nlink = cpu_to_le32(1);
  1938. inode_item->nblocks = cpu_to_le64(1);
  1939. inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
  1940. btrfs_set_root_bytenr(&root_item, leaf->start);
  1941. btrfs_set_root_level(&root_item, 0);
  1942. btrfs_set_root_refs(&root_item, 1);
  1943. btrfs_set_root_used(&root_item, 0);
  1944. memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
  1945. root_item.drop_level = 0;
  1946. free_extent_buffer(leaf);
  1947. leaf = NULL;
  1948. btrfs_set_root_dirid(&root_item, new_dirid);
  1949. key.objectid = objectid;
  1950. key.offset = 1;
  1951. btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
  1952. ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
  1953. &root_item);
  1954. if (ret)
  1955. goto fail;
  1956. /*
  1957. * insert the directory item
  1958. */
  1959. key.offset = (u64)-1;
  1960. dir = root->fs_info->sb->s_root->d_inode;
  1961. ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
  1962. name, namelen, dir->i_ino, &key,
  1963. BTRFS_FT_DIR);
  1964. if (ret)
  1965. goto fail;
  1966. ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
  1967. name, namelen, objectid,
  1968. root->fs_info->sb->s_root->d_inode->i_ino);
  1969. if (ret)
  1970. goto fail;
  1971. ret = btrfs_commit_transaction(trans, root);
  1972. if (ret)
  1973. goto fail_commit;
  1974. new_root = btrfs_read_fs_root(root->fs_info, &key, name, namelen);
  1975. BUG_ON(!new_root);
  1976. trans = btrfs_start_transaction(new_root, 1);
  1977. BUG_ON(!trans);
  1978. inode = btrfs_new_inode(trans, new_root, new_dirid,
  1979. BTRFS_I(dir)->block_group, S_IFDIR | 0700);
  1980. if (IS_ERR(inode))
  1981. goto fail;
  1982. inode->i_op = &btrfs_dir_inode_operations;
  1983. inode->i_fop = &btrfs_dir_file_operations;
  1984. new_root->inode = inode;
  1985. ret = btrfs_insert_inode_ref(trans, new_root, "..", 2, new_dirid,
  1986. new_dirid);
  1987. inode->i_nlink = 1;
  1988. inode->i_size = 0;
  1989. ret = btrfs_update_inode(trans, new_root, inode);
  1990. if (ret)
  1991. goto fail;
  1992. fail:
  1993. nr = trans->blocks_used;
  1994. err = btrfs_commit_transaction(trans, new_root);
  1995. if (err && !ret)
  1996. ret = err;
  1997. fail_commit:
  1998. mutex_unlock(&root->fs_info->fs_mutex);
  1999. btrfs_btree_balance_dirty(root, nr);
  2000. btrfs_throttle(root);
  2001. return ret;
  2002. }
  2003. static int create_snapshot(struct btrfs_root *root, char *name, int namelen)
  2004. {
  2005. struct btrfs_pending_snapshot *pending_snapshot;
  2006. struct btrfs_trans_handle *trans;
  2007. int ret;
  2008. int err;
  2009. unsigned long nr = 0;
  2010. if (!root->ref_cows)
  2011. return -EINVAL;
  2012. mutex_lock(&root->fs_info->fs_mutex);
  2013. ret = btrfs_check_free_space(root, 1, 0);
  2014. if (ret)
  2015. goto fail_unlock;
  2016. pending_snapshot = kmalloc(sizeof(*pending_snapshot), GFP_NOFS);
  2017. if (!pending_snapshot) {
  2018. ret = -ENOMEM;
  2019. goto fail_unlock;
  2020. }
  2021. pending_snapshot->name = kstrndup(name, namelen, GFP_NOFS);
  2022. if (!pending_snapshot->name) {
  2023. ret = -ENOMEM;
  2024. kfree(pending_snapshot);
  2025. goto fail_unlock;
  2026. }
  2027. trans = btrfs_start_transaction(root, 1);
  2028. BUG_ON(!trans);
  2029. pending_snapshot->root = root;
  2030. list_add(&pending_snapshot->list,
  2031. &trans->transaction->pending_snapshots);
  2032. ret = btrfs_update_inode(trans, root, root->inode);
  2033. err = btrfs_commit_transaction(trans, root);
  2034. fail_unlock:
  2035. mutex_unlock(&root->fs_info->fs_mutex);
  2036. btrfs_btree_balance_dirty(root, nr);
  2037. btrfs_throttle(root);
  2038. return ret;
  2039. }
  2040. unsigned long btrfs_force_ra(struct address_space *mapping,
  2041. struct file_ra_state *ra, struct file *file,
  2042. pgoff_t offset, pgoff_t last_index)
  2043. {
  2044. pgoff_t req_size;
  2045. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
  2046. req_size = last_index - offset + 1;
  2047. offset = page_cache_readahead(mapping, ra, file, offset, req_size);
  2048. return offset;
  2049. #else
  2050. req_size = min(last_index - offset + 1, (pgoff_t)128);
  2051. page_cache_sync_readahead(mapping, ra, file, offset, req_size);
  2052. return offset + req_size;
  2053. #endif
  2054. }
  2055. int btrfs_defrag_file(struct file *file) {
  2056. struct inode *inode = fdentry(file)->d_inode;
  2057. struct btrfs_root *root = BTRFS_I(inode)->root;
  2058. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  2059. struct page *page;
  2060. unsigned long last_index;
  2061. unsigned long ra_index = 0;
  2062. u64 page_start;
  2063. u64 page_end;
  2064. u64 delalloc_start;
  2065. u64 existing_delalloc;
  2066. unsigned long i;
  2067. int ret;
  2068. mutex_lock(&root->fs_info->fs_mutex);
  2069. ret = btrfs_check_free_space(root, inode->i_size, 0);
  2070. mutex_unlock(&root->fs_info->fs_mutex);
  2071. if (ret)
  2072. return -ENOSPC;
  2073. mutex_lock(&inode->i_mutex);
  2074. last_index = inode->i_size >> PAGE_CACHE_SHIFT;
  2075. for (i = 0; i <= last_index; i++) {
  2076. if (i == ra_index) {
  2077. ra_index = btrfs_force_ra(inode->i_mapping,
  2078. &file->f_ra,
  2079. file, ra_index, last_index);
  2080. }
  2081. page = grab_cache_page(inode->i_mapping, i);
  2082. if (!page)
  2083. goto out_unlock;
  2084. if (!PageUptodate(page)) {
  2085. btrfs_readpage(NULL, page);
  2086. lock_page(page);
  2087. if (!PageUptodate(page)) {
  2088. unlock_page(page);
  2089. page_cache_release(page);
  2090. goto out_unlock;
  2091. }
  2092. }
  2093. page_start = (u64)page->index << PAGE_CACHE_SHIFT;
  2094. page_end = page_start + PAGE_CACHE_SIZE - 1;
  2095. lock_extent(em_tree, page_start, page_end, GFP_NOFS);
  2096. delalloc_start = page_start;
  2097. existing_delalloc =
  2098. count_range_bits(&BTRFS_I(inode)->extent_tree,
  2099. &delalloc_start, page_end,
  2100. PAGE_CACHE_SIZE, EXTENT_DELALLOC);
  2101. set_extent_delalloc(em_tree, page_start,
  2102. page_end, GFP_NOFS);
  2103. spin_lock(&root->fs_info->delalloc_lock);
  2104. root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE -
  2105. existing_delalloc;
  2106. spin_unlock(&root->fs_info->delalloc_lock);
  2107. unlock_extent(em_tree, page_start, page_end, GFP_NOFS);
  2108. set_page_dirty(page);
  2109. unlock_page(page);
  2110. page_cache_release(page);
  2111. balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
  2112. }
  2113. out_unlock:
  2114. mutex_unlock(&inode->i_mutex);
  2115. return 0;
  2116. }
  2117. static int btrfs_ioctl_resize(struct btrfs_root *root, void __user *arg)
  2118. {
  2119. u64 new_size;
  2120. u64 old_size;
  2121. struct btrfs_ioctl_vol_args *vol_args;
  2122. struct btrfs_trans_handle *trans;
  2123. char *sizestr;
  2124. int ret = 0;
  2125. int namelen;
  2126. int mod = 0;
  2127. vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
  2128. if (!vol_args)
  2129. return -ENOMEM;
  2130. if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
  2131. ret = -EFAULT;
  2132. goto out;
  2133. }
  2134. namelen = strlen(vol_args->name);
  2135. if (namelen > BTRFS_VOL_NAME_MAX) {
  2136. ret = -EINVAL;
  2137. goto out;
  2138. }
  2139. sizestr = vol_args->name;
  2140. if (!strcmp(sizestr, "max"))
  2141. new_size = root->fs_info->sb->s_bdev->bd_inode->i_size;
  2142. else {
  2143. if (sizestr[0] == '-') {
  2144. mod = -1;
  2145. sizestr++;
  2146. } else if (sizestr[0] == '+') {
  2147. mod = 1;
  2148. sizestr++;
  2149. }
  2150. new_size = btrfs_parse_size(sizestr);
  2151. if (new_size == 0) {
  2152. ret = -EINVAL;
  2153. goto out;
  2154. }
  2155. }
  2156. mutex_lock(&root->fs_info->fs_mutex);
  2157. old_size = btrfs_super_total_bytes(&root->fs_info->super_copy);
  2158. if (mod < 0) {
  2159. if (new_size > old_size) {
  2160. ret = -EINVAL;
  2161. goto out_unlock;
  2162. }
  2163. new_size = old_size - new_size;
  2164. } else if (mod > 0) {
  2165. new_size = old_size + new_size;
  2166. }
  2167. if (new_size < 256 * 1024 * 1024) {
  2168. ret = -EINVAL;
  2169. goto out_unlock;
  2170. }
  2171. if (new_size > root->fs_info->sb->s_bdev->bd_inode->i_size) {
  2172. ret = -EFBIG;
  2173. goto out_unlock;
  2174. }
  2175. do_div(new_size, root->sectorsize);
  2176. new_size *= root->sectorsize;
  2177. printk("new size is %Lu\n", new_size);
  2178. if (new_size > old_size) {
  2179. trans = btrfs_start_transaction(root, 1);
  2180. ret = btrfs_grow_extent_tree(trans, root, new_size);
  2181. btrfs_commit_transaction(trans, root);
  2182. } else {
  2183. ret = btrfs_shrink_extent_tree(root, new_size);
  2184. }
  2185. out_unlock:
  2186. mutex_unlock(&root->fs_info->fs_mutex);
  2187. out:
  2188. kfree(vol_args);
  2189. return ret;
  2190. }
  2191. static int noinline btrfs_ioctl_snap_create(struct btrfs_root *root,
  2192. void __user *arg)
  2193. {
  2194. struct btrfs_ioctl_vol_args *vol_args;
  2195. struct btrfs_dir_item *di;
  2196. struct btrfs_path *path;
  2197. u64 root_dirid;
  2198. int namelen;
  2199. int ret;
  2200. vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
  2201. if (!vol_args)
  2202. return -ENOMEM;
  2203. if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
  2204. ret = -EFAULT;
  2205. goto out;
  2206. }
  2207. namelen = strlen(vol_args->name);
  2208. if (namelen > BTRFS_VOL_NAME_MAX) {
  2209. ret = -EINVAL;
  2210. goto out;
  2211. }
  2212. if (strchr(vol_args->name, '/')) {
  2213. ret = -EINVAL;
  2214. goto out;
  2215. }
  2216. path = btrfs_alloc_path();
  2217. if (!path) {
  2218. ret = -ENOMEM;
  2219. goto out;
  2220. }
  2221. root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
  2222. mutex_lock(&root->fs_info->fs_mutex);
  2223. di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
  2224. path, root_dirid,
  2225. vol_args->name, namelen, 0);
  2226. mutex_unlock(&root->fs_info->fs_mutex);
  2227. btrfs_free_path(path);
  2228. if (di && !IS_ERR(di)) {
  2229. ret = -EEXIST;
  2230. goto out;
  2231. }
  2232. if (IS_ERR(di)) {
  2233. ret = PTR_ERR(di);
  2234. goto out;
  2235. }
  2236. if (root == root->fs_info->tree_root)
  2237. ret = create_subvol(root, vol_args->name, namelen);
  2238. else
  2239. ret = create_snapshot(root, vol_args->name, namelen);
  2240. out:
  2241. kfree(vol_args);
  2242. return ret;
  2243. }
  2244. static int btrfs_ioctl_defrag(struct file *file)
  2245. {
  2246. struct inode *inode = fdentry(file)->d_inode;
  2247. struct btrfs_root *root = BTRFS_I(inode)->root;
  2248. switch (inode->i_mode & S_IFMT) {
  2249. case S_IFDIR:
  2250. mutex_lock(&root->fs_info->fs_mutex);
  2251. btrfs_defrag_root(root, 0);
  2252. btrfs_defrag_root(root->fs_info->extent_root, 0);
  2253. mutex_unlock(&root->fs_info->fs_mutex);
  2254. break;
  2255. case S_IFREG:
  2256. btrfs_defrag_file(file);
  2257. break;
  2258. }
  2259. return 0;
  2260. }
  2261. long btrfs_ioctl(struct file *file, unsigned int
  2262. cmd, unsigned long arg)
  2263. {
  2264. struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
  2265. switch (cmd) {
  2266. case BTRFS_IOC_SNAP_CREATE:
  2267. return btrfs_ioctl_snap_create(root, (void __user *)arg);
  2268. case BTRFS_IOC_DEFRAG:
  2269. return btrfs_ioctl_defrag(file);
  2270. case BTRFS_IOC_RESIZE:
  2271. return btrfs_ioctl_resize(root, (void __user *)arg);
  2272. }
  2273. return -ENOTTY;
  2274. }
  2275. /*
  2276. * Called inside transaction, so use GFP_NOFS
  2277. */
  2278. struct inode *btrfs_alloc_inode(struct super_block *sb)
  2279. {
  2280. struct btrfs_inode *ei;
  2281. ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
  2282. if (!ei)
  2283. return NULL;
  2284. ei->last_trans = 0;
  2285. ei->ordered_trans = 0;
  2286. return &ei->vfs_inode;
  2287. }
  2288. void btrfs_destroy_inode(struct inode *inode)
  2289. {
  2290. WARN_ON(!list_empty(&inode->i_dentry));
  2291. WARN_ON(inode->i_data.nrpages);
  2292. kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
  2293. }
  2294. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  2295. static void init_once(struct kmem_cache * cachep, void *foo)
  2296. #else
  2297. static void init_once(void * foo, struct kmem_cache * cachep,
  2298. unsigned long flags)
  2299. #endif
  2300. {
  2301. struct btrfs_inode *ei = (struct btrfs_inode *) foo;
  2302. inode_init_once(&ei->vfs_inode);
  2303. }
  2304. void btrfs_destroy_cachep(void)
  2305. {
  2306. if (btrfs_inode_cachep)
  2307. kmem_cache_destroy(btrfs_inode_cachep);
  2308. if (btrfs_trans_handle_cachep)
  2309. kmem_cache_destroy(btrfs_trans_handle_cachep);
  2310. if (btrfs_transaction_cachep)
  2311. kmem_cache_destroy(btrfs_transaction_cachep);
  2312. if (btrfs_bit_radix_cachep)
  2313. kmem_cache_destroy(btrfs_bit_radix_cachep);
  2314. if (btrfs_path_cachep)
  2315. kmem_cache_destroy(btrfs_path_cachep);
  2316. }
  2317. struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
  2318. unsigned long extra_flags,
  2319. #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
  2320. void (*ctor)(struct kmem_cache *, void *)
  2321. #else
  2322. void (*ctor)(void *, struct kmem_cache *,
  2323. unsigned long)
  2324. #endif
  2325. )
  2326. {
  2327. return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
  2328. SLAB_MEM_SPREAD | extra_flags), ctor
  2329. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
  2330. ,NULL
  2331. #endif
  2332. );
  2333. }
  2334. int btrfs_init_cachep(void)
  2335. {
  2336. btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
  2337. sizeof(struct btrfs_inode),
  2338. 0, init_once);
  2339. if (!btrfs_inode_cachep)
  2340. goto fail;
  2341. btrfs_trans_handle_cachep =
  2342. btrfs_cache_create("btrfs_trans_handle_cache",
  2343. sizeof(struct btrfs_trans_handle),
  2344. 0, NULL);
  2345. if (!btrfs_trans_handle_cachep)
  2346. goto fail;
  2347. btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
  2348. sizeof(struct btrfs_transaction),
  2349. 0, NULL);
  2350. if (!btrfs_transaction_cachep)
  2351. goto fail;
  2352. btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
  2353. sizeof(struct btrfs_path),
  2354. 0, NULL);
  2355. if (!btrfs_path_cachep)
  2356. goto fail;
  2357. btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
  2358. SLAB_DESTROY_BY_RCU, NULL);
  2359. if (!btrfs_bit_radix_cachep)
  2360. goto fail;
  2361. return 0;
  2362. fail:
  2363. btrfs_destroy_cachep();
  2364. return -ENOMEM;
  2365. }
  2366. static int btrfs_getattr(struct vfsmount *mnt,
  2367. struct dentry *dentry, struct kstat *stat)
  2368. {
  2369. struct inode *inode = dentry->d_inode;
  2370. generic_fillattr(inode, stat);
  2371. stat->blksize = PAGE_CACHE_SIZE;
  2372. return 0;
  2373. }
  2374. static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
  2375. struct inode * new_dir,struct dentry *new_dentry)
  2376. {
  2377. struct btrfs_trans_handle *trans;
  2378. struct btrfs_root *root = BTRFS_I(old_dir)->root;
  2379. struct inode *new_inode = new_dentry->d_inode;
  2380. struct inode *old_inode = old_dentry->d_inode;
  2381. struct timespec ctime = CURRENT_TIME;
  2382. struct btrfs_path *path;
  2383. int ret;
  2384. if (S_ISDIR(old_inode->i_mode) && new_inode &&
  2385. new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
  2386. return -ENOTEMPTY;
  2387. }
  2388. mutex_lock(&root->fs_info->fs_mutex);
  2389. ret = btrfs_check_free_space(root, 1, 0);
  2390. if (ret)
  2391. goto out_unlock;
  2392. trans = btrfs_start_transaction(root, 1);
  2393. btrfs_set_trans_block_group(trans, new_dir);
  2394. path = btrfs_alloc_path();
  2395. if (!path) {
  2396. ret = -ENOMEM;
  2397. goto out_fail;
  2398. }
  2399. old_dentry->d_inode->i_nlink++;
  2400. old_dir->i_ctime = old_dir->i_mtime = ctime;
  2401. new_dir->i_ctime = new_dir->i_mtime = ctime;
  2402. old_inode->i_ctime = ctime;
  2403. ret = btrfs_unlink_trans(trans, root, old_dir, old_dentry);
  2404. if (ret)
  2405. goto out_fail;
  2406. if (new_inode) {
  2407. new_inode->i_ctime = CURRENT_TIME;
  2408. ret = btrfs_unlink_trans(trans, root, new_dir, new_dentry);
  2409. if (ret)
  2410. goto out_fail;
  2411. }
  2412. ret = btrfs_add_link(trans, new_dentry, old_inode);
  2413. if (ret)
  2414. goto out_fail;
  2415. out_fail:
  2416. btrfs_free_path(path);
  2417. btrfs_end_transaction(trans, root);
  2418. out_unlock:
  2419. mutex_unlock(&root->fs_info->fs_mutex);
  2420. return ret;
  2421. }
  2422. static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
  2423. const char *symname)
  2424. {
  2425. struct btrfs_trans_handle *trans;
  2426. struct btrfs_root *root = BTRFS_I(dir)->root;
  2427. struct btrfs_path *path;
  2428. struct btrfs_key key;
  2429. struct inode *inode = NULL;
  2430. int err;
  2431. int drop_inode = 0;
  2432. u64 objectid;
  2433. int name_len;
  2434. int datasize;
  2435. unsigned long ptr;
  2436. struct btrfs_file_extent_item *ei;
  2437. struct extent_buffer *leaf;
  2438. unsigned long nr = 0;
  2439. name_len = strlen(symname) + 1;
  2440. if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
  2441. return -ENAMETOOLONG;
  2442. mutex_lock(&root->fs_info->fs_mutex);
  2443. err = btrfs_check_free_space(root, 1, 0);
  2444. if (err)
  2445. goto out_fail;
  2446. trans = btrfs_start_transaction(root, 1);
  2447. btrfs_set_trans_block_group(trans, dir);
  2448. err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
  2449. if (err) {
  2450. err = -ENOSPC;
  2451. goto out_unlock;
  2452. }
  2453. inode = btrfs_new_inode(trans, root, objectid,
  2454. BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO);
  2455. err = PTR_ERR(inode);
  2456. if (IS_ERR(inode))
  2457. goto out_unlock;
  2458. btrfs_set_trans_block_group(trans, inode);
  2459. err = btrfs_add_nondir(trans, dentry, inode);
  2460. if (err)
  2461. drop_inode = 1;
  2462. else {
  2463. inode->i_mapping->a_ops = &btrfs_aops;
  2464. inode->i_fop = &btrfs_file_operations;
  2465. inode->i_op = &btrfs_file_inode_operations;
  2466. extent_map_tree_init(&BTRFS_I(inode)->extent_tree,
  2467. inode->i_mapping, GFP_NOFS);
  2468. BTRFS_I(inode)->extent_tree.ops = &btrfs_extent_map_ops;
  2469. }
  2470. dir->i_sb->s_dirt = 1;
  2471. btrfs_update_inode_block_group(trans, inode);
  2472. btrfs_update_inode_block_group(trans, dir);
  2473. if (drop_inode)
  2474. goto out_unlock;
  2475. path = btrfs_alloc_path();
  2476. BUG_ON(!path);
  2477. key.objectid = inode->i_ino;
  2478. key.offset = 0;
  2479. btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
  2480. datasize = btrfs_file_extent_calc_inline_size(name_len);
  2481. err = btrfs_insert_empty_item(trans, root, path, &key,
  2482. datasize);
  2483. if (err) {
  2484. drop_inode = 1;
  2485. goto out_unlock;
  2486. }
  2487. leaf = path->nodes[0];
  2488. ei = btrfs_item_ptr(leaf, path->slots[0],
  2489. struct btrfs_file_extent_item);
  2490. btrfs_set_file_extent_generation(leaf, ei, trans->transid);
  2491. btrfs_set_file_extent_type(leaf, ei,
  2492. BTRFS_FILE_EXTENT_INLINE);
  2493. ptr = btrfs_file_extent_inline_start(ei);
  2494. write_extent_buffer(leaf, symname, ptr, name_len);
  2495. btrfs_mark_buffer_dirty(leaf);
  2496. btrfs_free_path(path);
  2497. inode->i_op = &btrfs_symlink_inode_operations;
  2498. inode->i_mapping->a_ops = &btrfs_symlink_aops;
  2499. inode->i_size = name_len - 1;
  2500. err = btrfs_update_inode(trans, root, inode);
  2501. if (err)
  2502. drop_inode = 1;
  2503. out_unlock:
  2504. nr = trans->blocks_used;
  2505. btrfs_end_transaction(trans, root);
  2506. out_fail:
  2507. mutex_unlock(&root->fs_info->fs_mutex);
  2508. if (drop_inode) {
  2509. inode_dec_link_count(inode);
  2510. iput(inode);
  2511. }
  2512. btrfs_btree_balance_dirty(root, nr);
  2513. btrfs_throttle(root);
  2514. return err;
  2515. }
  2516. static struct inode_operations btrfs_dir_inode_operations = {
  2517. .lookup = btrfs_lookup,
  2518. .create = btrfs_create,
  2519. .unlink = btrfs_unlink,
  2520. .link = btrfs_link,
  2521. .mkdir = btrfs_mkdir,
  2522. .rmdir = btrfs_rmdir,
  2523. .rename = btrfs_rename,
  2524. .symlink = btrfs_symlink,
  2525. .setattr = btrfs_setattr,
  2526. .mknod = btrfs_mknod,
  2527. .setxattr = generic_setxattr,
  2528. .getxattr = generic_getxattr,
  2529. .listxattr = btrfs_listxattr,
  2530. .removexattr = generic_removexattr,
  2531. };
  2532. static struct inode_operations btrfs_dir_ro_inode_operations = {
  2533. .lookup = btrfs_lookup,
  2534. };
  2535. static struct file_operations btrfs_dir_file_operations = {
  2536. .llseek = generic_file_llseek,
  2537. .read = generic_read_dir,
  2538. .readdir = btrfs_readdir,
  2539. .unlocked_ioctl = btrfs_ioctl,
  2540. #ifdef CONFIG_COMPAT
  2541. .compat_ioctl = btrfs_ioctl,
  2542. #endif
  2543. };
  2544. static struct extent_map_ops btrfs_extent_map_ops = {
  2545. .fill_delalloc = run_delalloc_range,
  2546. .writepage_io_hook = btrfs_writepage_io_hook,
  2547. .readpage_io_hook = btrfs_readpage_io_hook,
  2548. .readpage_end_io_hook = btrfs_readpage_end_io_hook,
  2549. };
  2550. static struct address_space_operations btrfs_aops = {
  2551. .readpage = btrfs_readpage,
  2552. .writepage = btrfs_writepage,
  2553. .writepages = btrfs_writepages,
  2554. .readpages = btrfs_readpages,
  2555. .sync_page = block_sync_page,
  2556. .bmap = btrfs_bmap,
  2557. .invalidatepage = btrfs_invalidatepage,
  2558. .releasepage = btrfs_releasepage,
  2559. .set_page_dirty = __set_page_dirty_nobuffers,
  2560. };
  2561. static struct address_space_operations btrfs_symlink_aops = {
  2562. .readpage = btrfs_readpage,
  2563. .writepage = btrfs_writepage,
  2564. .invalidatepage = btrfs_invalidatepage,
  2565. .releasepage = btrfs_releasepage,
  2566. };
  2567. static struct inode_operations btrfs_file_inode_operations = {
  2568. .truncate = btrfs_truncate,
  2569. .getattr = btrfs_getattr,
  2570. .setattr = btrfs_setattr,
  2571. .setxattr = generic_setxattr,
  2572. .getxattr = generic_getxattr,
  2573. .listxattr = btrfs_listxattr,
  2574. .removexattr = generic_removexattr,
  2575. };
  2576. static struct inode_operations btrfs_special_inode_operations = {
  2577. .getattr = btrfs_getattr,
  2578. .setattr = btrfs_setattr,
  2579. };
  2580. static struct inode_operations btrfs_symlink_inode_operations = {
  2581. .readlink = generic_readlink,
  2582. .follow_link = page_follow_link_light,
  2583. .put_link = page_put_link,
  2584. };