extents.c 103 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940
  1. /*
  2. * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
  3. * Written by Alex Tomas <alex@clusterfs.com>
  4. *
  5. * Architecture independence:
  6. * Copyright (c) 2005, Bull S.A.
  7. * Written by Pierre Peiffer <pierre.peiffer@bull.net>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public Licens
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
  21. */
  22. /*
  23. * Extents support for EXT4
  24. *
  25. * TODO:
  26. * - ext4*_error() should be used in some situations
  27. * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
  28. * - smart tree reduction
  29. */
  30. #include <linux/module.h>
  31. #include <linux/fs.h>
  32. #include <linux/time.h>
  33. #include <linux/jbd2.h>
  34. #include <linux/highuid.h>
  35. #include <linux/pagemap.h>
  36. #include <linux/quotaops.h>
  37. #include <linux/string.h>
  38. #include <linux/slab.h>
  39. #include <linux/falloc.h>
  40. #include <asm/uaccess.h>
  41. #include <linux/fiemap.h>
  42. #include "ext4_jbd2.h"
  43. #include "ext4_extents.h"
  44. #include <trace/events/ext4.h>
  45. static int ext4_ext_truncate_extend_restart(handle_t *handle,
  46. struct inode *inode,
  47. int needed)
  48. {
  49. int err;
  50. if (!ext4_handle_valid(handle))
  51. return 0;
  52. if (handle->h_buffer_credits > needed)
  53. return 0;
  54. err = ext4_journal_extend(handle, needed);
  55. if (err <= 0)
  56. return err;
  57. err = ext4_truncate_restart_trans(handle, inode, needed);
  58. if (err == 0)
  59. err = -EAGAIN;
  60. return err;
  61. }
  62. /*
  63. * could return:
  64. * - EROFS
  65. * - ENOMEM
  66. */
  67. static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
  68. struct ext4_ext_path *path)
  69. {
  70. if (path->p_bh) {
  71. /* path points to block */
  72. return ext4_journal_get_write_access(handle, path->p_bh);
  73. }
  74. /* path points to leaf/index in inode body */
  75. /* we use in-core data, no need to protect them */
  76. return 0;
  77. }
  78. /*
  79. * could return:
  80. * - EROFS
  81. * - ENOMEM
  82. * - EIO
  83. */
  84. static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
  85. struct ext4_ext_path *path)
  86. {
  87. int err;
  88. if (path->p_bh) {
  89. /* path points to block */
  90. err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
  91. } else {
  92. /* path points to leaf/index in inode body */
  93. err = ext4_mark_inode_dirty(handle, inode);
  94. }
  95. return err;
  96. }
  97. static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
  98. struct ext4_ext_path *path,
  99. ext4_lblk_t block)
  100. {
  101. struct ext4_inode_info *ei = EXT4_I(inode);
  102. ext4_fsblk_t bg_start;
  103. ext4_fsblk_t last_block;
  104. ext4_grpblk_t colour;
  105. ext4_group_t block_group;
  106. int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
  107. int depth;
  108. if (path) {
  109. struct ext4_extent *ex;
  110. depth = path->p_depth;
  111. /*
  112. * Try to predict block placement assuming that we are
  113. * filling in a file which will eventually be
  114. * non-sparse --- i.e., in the case of libbfd writing
  115. * an ELF object sections out-of-order but in a way
  116. * the eventually results in a contiguous object or
  117. * executable file, or some database extending a table
  118. * space file. However, this is actually somewhat
  119. * non-ideal if we are writing a sparse file such as
  120. * qemu or KVM writing a raw image file that is going
  121. * to stay fairly sparse, since it will end up
  122. * fragmenting the file system's free space. Maybe we
  123. * should have some hueristics or some way to allow
  124. * userspace to pass a hint to file system,
  125. * especially if the latter case turns out to be
  126. * common.
  127. */
  128. ex = path[depth].p_ext;
  129. if (ex) {
  130. ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
  131. ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
  132. if (block > ext_block)
  133. return ext_pblk + (block - ext_block);
  134. else
  135. return ext_pblk - (ext_block - block);
  136. }
  137. /* it looks like index is empty;
  138. * try to find starting block from index itself */
  139. if (path[depth].p_bh)
  140. return path[depth].p_bh->b_blocknr;
  141. }
  142. /* OK. use inode's group */
  143. block_group = ei->i_block_group;
  144. if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
  145. /*
  146. * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
  147. * block groups per flexgroup, reserve the first block
  148. * group for directories and special files. Regular
  149. * files will start at the second block group. This
  150. * tends to speed up directory access and improves
  151. * fsck times.
  152. */
  153. block_group &= ~(flex_size-1);
  154. if (S_ISREG(inode->i_mode))
  155. block_group++;
  156. }
  157. bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
  158. last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
  159. /*
  160. * If we are doing delayed allocation, we don't need take
  161. * colour into account.
  162. */
  163. if (test_opt(inode->i_sb, DELALLOC))
  164. return bg_start;
  165. if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
  166. colour = (current->pid % 16) *
  167. (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
  168. else
  169. colour = (current->pid % 16) * ((last_block - bg_start) / 16);
  170. return bg_start + colour + block;
  171. }
  172. /*
  173. * Allocation for a meta data block
  174. */
  175. static ext4_fsblk_t
  176. ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
  177. struct ext4_ext_path *path,
  178. struct ext4_extent *ex, int *err, unsigned int flags)
  179. {
  180. ext4_fsblk_t goal, newblock;
  181. goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
  182. newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
  183. NULL, err);
  184. return newblock;
  185. }
  186. static inline int ext4_ext_space_block(struct inode *inode, int check)
  187. {
  188. int size;
  189. size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
  190. / sizeof(struct ext4_extent);
  191. if (!check) {
  192. #ifdef AGGRESSIVE_TEST
  193. if (size > 6)
  194. size = 6;
  195. #endif
  196. }
  197. return size;
  198. }
  199. static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
  200. {
  201. int size;
  202. size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
  203. / sizeof(struct ext4_extent_idx);
  204. if (!check) {
  205. #ifdef AGGRESSIVE_TEST
  206. if (size > 5)
  207. size = 5;
  208. #endif
  209. }
  210. return size;
  211. }
  212. static inline int ext4_ext_space_root(struct inode *inode, int check)
  213. {
  214. int size;
  215. size = sizeof(EXT4_I(inode)->i_data);
  216. size -= sizeof(struct ext4_extent_header);
  217. size /= sizeof(struct ext4_extent);
  218. if (!check) {
  219. #ifdef AGGRESSIVE_TEST
  220. if (size > 3)
  221. size = 3;
  222. #endif
  223. }
  224. return size;
  225. }
  226. static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
  227. {
  228. int size;
  229. size = sizeof(EXT4_I(inode)->i_data);
  230. size -= sizeof(struct ext4_extent_header);
  231. size /= sizeof(struct ext4_extent_idx);
  232. if (!check) {
  233. #ifdef AGGRESSIVE_TEST
  234. if (size > 4)
  235. size = 4;
  236. #endif
  237. }
  238. return size;
  239. }
  240. /*
  241. * Calculate the number of metadata blocks needed
  242. * to allocate @blocks
  243. * Worse case is one block per extent
  244. */
  245. int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
  246. {
  247. struct ext4_inode_info *ei = EXT4_I(inode);
  248. int idxs, num = 0;
  249. idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
  250. / sizeof(struct ext4_extent_idx));
  251. /*
  252. * If the new delayed allocation block is contiguous with the
  253. * previous da block, it can share index blocks with the
  254. * previous block, so we only need to allocate a new index
  255. * block every idxs leaf blocks. At ldxs**2 blocks, we need
  256. * an additional index block, and at ldxs**3 blocks, yet
  257. * another index blocks.
  258. */
  259. if (ei->i_da_metadata_calc_len &&
  260. ei->i_da_metadata_calc_last_lblock+1 == lblock) {
  261. if ((ei->i_da_metadata_calc_len % idxs) == 0)
  262. num++;
  263. if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
  264. num++;
  265. if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
  266. num++;
  267. ei->i_da_metadata_calc_len = 0;
  268. } else
  269. ei->i_da_metadata_calc_len++;
  270. ei->i_da_metadata_calc_last_lblock++;
  271. return num;
  272. }
  273. /*
  274. * In the worst case we need a new set of index blocks at
  275. * every level of the inode's extent tree.
  276. */
  277. ei->i_da_metadata_calc_len = 1;
  278. ei->i_da_metadata_calc_last_lblock = lblock;
  279. return ext_depth(inode) + 1;
  280. }
  281. static int
  282. ext4_ext_max_entries(struct inode *inode, int depth)
  283. {
  284. int max;
  285. if (depth == ext_depth(inode)) {
  286. if (depth == 0)
  287. max = ext4_ext_space_root(inode, 1);
  288. else
  289. max = ext4_ext_space_root_idx(inode, 1);
  290. } else {
  291. if (depth == 0)
  292. max = ext4_ext_space_block(inode, 1);
  293. else
  294. max = ext4_ext_space_block_idx(inode, 1);
  295. }
  296. return max;
  297. }
  298. static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
  299. {
  300. ext4_fsblk_t block = ext4_ext_pblock(ext);
  301. int len = ext4_ext_get_actual_len(ext);
  302. return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
  303. }
  304. static int ext4_valid_extent_idx(struct inode *inode,
  305. struct ext4_extent_idx *ext_idx)
  306. {
  307. ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
  308. return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
  309. }
  310. static int ext4_valid_extent_entries(struct inode *inode,
  311. struct ext4_extent_header *eh,
  312. int depth)
  313. {
  314. struct ext4_extent *ext;
  315. struct ext4_extent_idx *ext_idx;
  316. unsigned short entries;
  317. if (eh->eh_entries == 0)
  318. return 1;
  319. entries = le16_to_cpu(eh->eh_entries);
  320. if (depth == 0) {
  321. /* leaf entries */
  322. ext = EXT_FIRST_EXTENT(eh);
  323. while (entries) {
  324. if (!ext4_valid_extent(inode, ext))
  325. return 0;
  326. ext++;
  327. entries--;
  328. }
  329. } else {
  330. ext_idx = EXT_FIRST_INDEX(eh);
  331. while (entries) {
  332. if (!ext4_valid_extent_idx(inode, ext_idx))
  333. return 0;
  334. ext_idx++;
  335. entries--;
  336. }
  337. }
  338. return 1;
  339. }
  340. static int __ext4_ext_check(const char *function, unsigned int line,
  341. struct inode *inode, struct ext4_extent_header *eh,
  342. int depth)
  343. {
  344. const char *error_msg;
  345. int max = 0;
  346. if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
  347. error_msg = "invalid magic";
  348. goto corrupted;
  349. }
  350. if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
  351. error_msg = "unexpected eh_depth";
  352. goto corrupted;
  353. }
  354. if (unlikely(eh->eh_max == 0)) {
  355. error_msg = "invalid eh_max";
  356. goto corrupted;
  357. }
  358. max = ext4_ext_max_entries(inode, depth);
  359. if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
  360. error_msg = "too large eh_max";
  361. goto corrupted;
  362. }
  363. if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
  364. error_msg = "invalid eh_entries";
  365. goto corrupted;
  366. }
  367. if (!ext4_valid_extent_entries(inode, eh, depth)) {
  368. error_msg = "invalid extent entries";
  369. goto corrupted;
  370. }
  371. return 0;
  372. corrupted:
  373. ext4_error_inode(inode, function, line, 0,
  374. "bad header/extent: %s - magic %x, "
  375. "entries %u, max %u(%u), depth %u(%u)",
  376. error_msg, le16_to_cpu(eh->eh_magic),
  377. le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
  378. max, le16_to_cpu(eh->eh_depth), depth);
  379. return -EIO;
  380. }
  381. #define ext4_ext_check(inode, eh, depth) \
  382. __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
  383. int ext4_ext_check_inode(struct inode *inode)
  384. {
  385. return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
  386. }
  387. #ifdef EXT_DEBUG
  388. static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
  389. {
  390. int k, l = path->p_depth;
  391. ext_debug("path:");
  392. for (k = 0; k <= l; k++, path++) {
  393. if (path->p_idx) {
  394. ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
  395. ext4_idx_pblock(path->p_idx));
  396. } else if (path->p_ext) {
  397. ext_debug(" %d:[%d]%d:%llu ",
  398. le32_to_cpu(path->p_ext->ee_block),
  399. ext4_ext_is_uninitialized(path->p_ext),
  400. ext4_ext_get_actual_len(path->p_ext),
  401. ext4_ext_pblock(path->p_ext));
  402. } else
  403. ext_debug(" []");
  404. }
  405. ext_debug("\n");
  406. }
  407. static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
  408. {
  409. int depth = ext_depth(inode);
  410. struct ext4_extent_header *eh;
  411. struct ext4_extent *ex;
  412. int i;
  413. if (!path)
  414. return;
  415. eh = path[depth].p_hdr;
  416. ex = EXT_FIRST_EXTENT(eh);
  417. ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
  418. for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
  419. ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
  420. ext4_ext_is_uninitialized(ex),
  421. ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
  422. }
  423. ext_debug("\n");
  424. }
  425. #else
  426. #define ext4_ext_show_path(inode, path)
  427. #define ext4_ext_show_leaf(inode, path)
  428. #endif
  429. void ext4_ext_drop_refs(struct ext4_ext_path *path)
  430. {
  431. int depth = path->p_depth;
  432. int i;
  433. for (i = 0; i <= depth; i++, path++)
  434. if (path->p_bh) {
  435. brelse(path->p_bh);
  436. path->p_bh = NULL;
  437. }
  438. }
  439. /*
  440. * ext4_ext_binsearch_idx:
  441. * binary search for the closest index of the given block
  442. * the header must be checked before calling this
  443. */
  444. static void
  445. ext4_ext_binsearch_idx(struct inode *inode,
  446. struct ext4_ext_path *path, ext4_lblk_t block)
  447. {
  448. struct ext4_extent_header *eh = path->p_hdr;
  449. struct ext4_extent_idx *r, *l, *m;
  450. ext_debug("binsearch for %u(idx): ", block);
  451. l = EXT_FIRST_INDEX(eh) + 1;
  452. r = EXT_LAST_INDEX(eh);
  453. while (l <= r) {
  454. m = l + (r - l) / 2;
  455. if (block < le32_to_cpu(m->ei_block))
  456. r = m - 1;
  457. else
  458. l = m + 1;
  459. ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
  460. m, le32_to_cpu(m->ei_block),
  461. r, le32_to_cpu(r->ei_block));
  462. }
  463. path->p_idx = l - 1;
  464. ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
  465. ext4_idx_pblock(path->p_idx));
  466. #ifdef CHECK_BINSEARCH
  467. {
  468. struct ext4_extent_idx *chix, *ix;
  469. int k;
  470. chix = ix = EXT_FIRST_INDEX(eh);
  471. for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
  472. if (k != 0 &&
  473. le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
  474. printk(KERN_DEBUG "k=%d, ix=0x%p, "
  475. "first=0x%p\n", k,
  476. ix, EXT_FIRST_INDEX(eh));
  477. printk(KERN_DEBUG "%u <= %u\n",
  478. le32_to_cpu(ix->ei_block),
  479. le32_to_cpu(ix[-1].ei_block));
  480. }
  481. BUG_ON(k && le32_to_cpu(ix->ei_block)
  482. <= le32_to_cpu(ix[-1].ei_block));
  483. if (block < le32_to_cpu(ix->ei_block))
  484. break;
  485. chix = ix;
  486. }
  487. BUG_ON(chix != path->p_idx);
  488. }
  489. #endif
  490. }
  491. /*
  492. * ext4_ext_binsearch:
  493. * binary search for closest extent of the given block
  494. * the header must be checked before calling this
  495. */
  496. static void
  497. ext4_ext_binsearch(struct inode *inode,
  498. struct ext4_ext_path *path, ext4_lblk_t block)
  499. {
  500. struct ext4_extent_header *eh = path->p_hdr;
  501. struct ext4_extent *r, *l, *m;
  502. if (eh->eh_entries == 0) {
  503. /*
  504. * this leaf is empty:
  505. * we get such a leaf in split/add case
  506. */
  507. return;
  508. }
  509. ext_debug("binsearch for %u: ", block);
  510. l = EXT_FIRST_EXTENT(eh) + 1;
  511. r = EXT_LAST_EXTENT(eh);
  512. while (l <= r) {
  513. m = l + (r - l) / 2;
  514. if (block < le32_to_cpu(m->ee_block))
  515. r = m - 1;
  516. else
  517. l = m + 1;
  518. ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
  519. m, le32_to_cpu(m->ee_block),
  520. r, le32_to_cpu(r->ee_block));
  521. }
  522. path->p_ext = l - 1;
  523. ext_debug(" -> %d:%llu:[%d]%d ",
  524. le32_to_cpu(path->p_ext->ee_block),
  525. ext4_ext_pblock(path->p_ext),
  526. ext4_ext_is_uninitialized(path->p_ext),
  527. ext4_ext_get_actual_len(path->p_ext));
  528. #ifdef CHECK_BINSEARCH
  529. {
  530. struct ext4_extent *chex, *ex;
  531. int k;
  532. chex = ex = EXT_FIRST_EXTENT(eh);
  533. for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
  534. BUG_ON(k && le32_to_cpu(ex->ee_block)
  535. <= le32_to_cpu(ex[-1].ee_block));
  536. if (block < le32_to_cpu(ex->ee_block))
  537. break;
  538. chex = ex;
  539. }
  540. BUG_ON(chex != path->p_ext);
  541. }
  542. #endif
  543. }
  544. int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
  545. {
  546. struct ext4_extent_header *eh;
  547. eh = ext_inode_hdr(inode);
  548. eh->eh_depth = 0;
  549. eh->eh_entries = 0;
  550. eh->eh_magic = EXT4_EXT_MAGIC;
  551. eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
  552. ext4_mark_inode_dirty(handle, inode);
  553. ext4_ext_invalidate_cache(inode);
  554. return 0;
  555. }
  556. struct ext4_ext_path *
  557. ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
  558. struct ext4_ext_path *path)
  559. {
  560. struct ext4_extent_header *eh;
  561. struct buffer_head *bh;
  562. short int depth, i, ppos = 0, alloc = 0;
  563. eh = ext_inode_hdr(inode);
  564. depth = ext_depth(inode);
  565. /* account possible depth increase */
  566. if (!path) {
  567. path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
  568. GFP_NOFS);
  569. if (!path)
  570. return ERR_PTR(-ENOMEM);
  571. alloc = 1;
  572. }
  573. path[0].p_hdr = eh;
  574. path[0].p_bh = NULL;
  575. i = depth;
  576. /* walk through the tree */
  577. while (i) {
  578. int need_to_validate = 0;
  579. ext_debug("depth %d: num %d, max %d\n",
  580. ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
  581. ext4_ext_binsearch_idx(inode, path + ppos, block);
  582. path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
  583. path[ppos].p_depth = i;
  584. path[ppos].p_ext = NULL;
  585. bh = sb_getblk(inode->i_sb, path[ppos].p_block);
  586. if (unlikely(!bh))
  587. goto err;
  588. if (!bh_uptodate_or_lock(bh)) {
  589. trace_ext4_ext_load_extent(inode, block,
  590. path[ppos].p_block);
  591. if (bh_submit_read(bh) < 0) {
  592. put_bh(bh);
  593. goto err;
  594. }
  595. /* validate the extent entries */
  596. need_to_validate = 1;
  597. }
  598. eh = ext_block_hdr(bh);
  599. ppos++;
  600. if (unlikely(ppos > depth)) {
  601. put_bh(bh);
  602. EXT4_ERROR_INODE(inode,
  603. "ppos %d > depth %d", ppos, depth);
  604. goto err;
  605. }
  606. path[ppos].p_bh = bh;
  607. path[ppos].p_hdr = eh;
  608. i--;
  609. if (need_to_validate && ext4_ext_check(inode, eh, i))
  610. goto err;
  611. }
  612. path[ppos].p_depth = i;
  613. path[ppos].p_ext = NULL;
  614. path[ppos].p_idx = NULL;
  615. /* find extent */
  616. ext4_ext_binsearch(inode, path + ppos, block);
  617. /* if not an empty leaf */
  618. if (path[ppos].p_ext)
  619. path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
  620. ext4_ext_show_path(inode, path);
  621. return path;
  622. err:
  623. ext4_ext_drop_refs(path);
  624. if (alloc)
  625. kfree(path);
  626. return ERR_PTR(-EIO);
  627. }
  628. /*
  629. * ext4_ext_insert_index:
  630. * insert new index [@logical;@ptr] into the block at @curp;
  631. * check where to insert: before @curp or after @curp
  632. */
  633. static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
  634. struct ext4_ext_path *curp,
  635. int logical, ext4_fsblk_t ptr)
  636. {
  637. struct ext4_extent_idx *ix;
  638. int len, err;
  639. err = ext4_ext_get_access(handle, inode, curp);
  640. if (err)
  641. return err;
  642. if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
  643. EXT4_ERROR_INODE(inode,
  644. "logical %d == ei_block %d!",
  645. logical, le32_to_cpu(curp->p_idx->ei_block));
  646. return -EIO;
  647. }
  648. len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
  649. if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
  650. /* insert after */
  651. if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
  652. len = (len - 1) * sizeof(struct ext4_extent_idx);
  653. len = len < 0 ? 0 : len;
  654. ext_debug("insert new index %d after: %llu. "
  655. "move %d from 0x%p to 0x%p\n",
  656. logical, ptr, len,
  657. (curp->p_idx + 1), (curp->p_idx + 2));
  658. memmove(curp->p_idx + 2, curp->p_idx + 1, len);
  659. }
  660. ix = curp->p_idx + 1;
  661. } else {
  662. /* insert before */
  663. len = len * sizeof(struct ext4_extent_idx);
  664. len = len < 0 ? 0 : len;
  665. ext_debug("insert new index %d before: %llu. "
  666. "move %d from 0x%p to 0x%p\n",
  667. logical, ptr, len,
  668. curp->p_idx, (curp->p_idx + 1));
  669. memmove(curp->p_idx + 1, curp->p_idx, len);
  670. ix = curp->p_idx;
  671. }
  672. ix->ei_block = cpu_to_le32(logical);
  673. ext4_idx_store_pblock(ix, ptr);
  674. le16_add_cpu(&curp->p_hdr->eh_entries, 1);
  675. if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
  676. > le16_to_cpu(curp->p_hdr->eh_max))) {
  677. EXT4_ERROR_INODE(inode,
  678. "logical %d == ei_block %d!",
  679. logical, le32_to_cpu(curp->p_idx->ei_block));
  680. return -EIO;
  681. }
  682. if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
  683. EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
  684. return -EIO;
  685. }
  686. err = ext4_ext_dirty(handle, inode, curp);
  687. ext4_std_error(inode->i_sb, err);
  688. return err;
  689. }
  690. /*
  691. * ext4_ext_split:
  692. * inserts new subtree into the path, using free index entry
  693. * at depth @at:
  694. * - allocates all needed blocks (new leaf and all intermediate index blocks)
  695. * - makes decision where to split
  696. * - moves remaining extents and index entries (right to the split point)
  697. * into the newly allocated blocks
  698. * - initializes subtree
  699. */
  700. static int ext4_ext_split(handle_t *handle, struct inode *inode,
  701. unsigned int flags,
  702. struct ext4_ext_path *path,
  703. struct ext4_extent *newext, int at)
  704. {
  705. struct buffer_head *bh = NULL;
  706. int depth = ext_depth(inode);
  707. struct ext4_extent_header *neh;
  708. struct ext4_extent_idx *fidx;
  709. struct ext4_extent *ex;
  710. int i = at, k, m, a;
  711. ext4_fsblk_t newblock, oldblock;
  712. __le32 border;
  713. ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
  714. int err = 0;
  715. /* make decision: where to split? */
  716. /* FIXME: now decision is simplest: at current extent */
  717. /* if current leaf will be split, then we should use
  718. * border from split point */
  719. if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
  720. EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
  721. return -EIO;
  722. }
  723. if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
  724. border = path[depth].p_ext[1].ee_block;
  725. ext_debug("leaf will be split."
  726. " next leaf starts at %d\n",
  727. le32_to_cpu(border));
  728. } else {
  729. border = newext->ee_block;
  730. ext_debug("leaf will be added."
  731. " next leaf starts at %d\n",
  732. le32_to_cpu(border));
  733. }
  734. /*
  735. * If error occurs, then we break processing
  736. * and mark filesystem read-only. index won't
  737. * be inserted and tree will be in consistent
  738. * state. Next mount will repair buffers too.
  739. */
  740. /*
  741. * Get array to track all allocated blocks.
  742. * We need this to handle errors and free blocks
  743. * upon them.
  744. */
  745. ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
  746. if (!ablocks)
  747. return -ENOMEM;
  748. /* allocate all needed blocks */
  749. ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
  750. for (a = 0; a < depth - at; a++) {
  751. newblock = ext4_ext_new_meta_block(handle, inode, path,
  752. newext, &err, flags);
  753. if (newblock == 0)
  754. goto cleanup;
  755. ablocks[a] = newblock;
  756. }
  757. /* initialize new leaf */
  758. newblock = ablocks[--a];
  759. if (unlikely(newblock == 0)) {
  760. EXT4_ERROR_INODE(inode, "newblock == 0!");
  761. err = -EIO;
  762. goto cleanup;
  763. }
  764. bh = sb_getblk(inode->i_sb, newblock);
  765. if (!bh) {
  766. err = -EIO;
  767. goto cleanup;
  768. }
  769. lock_buffer(bh);
  770. err = ext4_journal_get_create_access(handle, bh);
  771. if (err)
  772. goto cleanup;
  773. neh = ext_block_hdr(bh);
  774. neh->eh_entries = 0;
  775. neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
  776. neh->eh_magic = EXT4_EXT_MAGIC;
  777. neh->eh_depth = 0;
  778. ex = EXT_FIRST_EXTENT(neh);
  779. /* move remainder of path[depth] to the new leaf */
  780. if (unlikely(path[depth].p_hdr->eh_entries !=
  781. path[depth].p_hdr->eh_max)) {
  782. EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
  783. path[depth].p_hdr->eh_entries,
  784. path[depth].p_hdr->eh_max);
  785. err = -EIO;
  786. goto cleanup;
  787. }
  788. /* start copy from next extent */
  789. /* TODO: we could do it by single memmove */
  790. m = 0;
  791. path[depth].p_ext++;
  792. while (path[depth].p_ext <=
  793. EXT_MAX_EXTENT(path[depth].p_hdr)) {
  794. ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
  795. le32_to_cpu(path[depth].p_ext->ee_block),
  796. ext4_ext_pblock(path[depth].p_ext),
  797. ext4_ext_is_uninitialized(path[depth].p_ext),
  798. ext4_ext_get_actual_len(path[depth].p_ext),
  799. newblock);
  800. /*memmove(ex++, path[depth].p_ext++,
  801. sizeof(struct ext4_extent));
  802. neh->eh_entries++;*/
  803. path[depth].p_ext++;
  804. m++;
  805. }
  806. if (m) {
  807. memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
  808. le16_add_cpu(&neh->eh_entries, m);
  809. }
  810. set_buffer_uptodate(bh);
  811. unlock_buffer(bh);
  812. err = ext4_handle_dirty_metadata(handle, inode, bh);
  813. if (err)
  814. goto cleanup;
  815. brelse(bh);
  816. bh = NULL;
  817. /* correct old leaf */
  818. if (m) {
  819. err = ext4_ext_get_access(handle, inode, path + depth);
  820. if (err)
  821. goto cleanup;
  822. le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
  823. err = ext4_ext_dirty(handle, inode, path + depth);
  824. if (err)
  825. goto cleanup;
  826. }
  827. /* create intermediate indexes */
  828. k = depth - at - 1;
  829. if (unlikely(k < 0)) {
  830. EXT4_ERROR_INODE(inode, "k %d < 0!", k);
  831. err = -EIO;
  832. goto cleanup;
  833. }
  834. if (k)
  835. ext_debug("create %d intermediate indices\n", k);
  836. /* insert new index into current index block */
  837. /* current depth stored in i var */
  838. i = depth - 1;
  839. while (k--) {
  840. oldblock = newblock;
  841. newblock = ablocks[--a];
  842. bh = sb_getblk(inode->i_sb, newblock);
  843. if (!bh) {
  844. err = -EIO;
  845. goto cleanup;
  846. }
  847. lock_buffer(bh);
  848. err = ext4_journal_get_create_access(handle, bh);
  849. if (err)
  850. goto cleanup;
  851. neh = ext_block_hdr(bh);
  852. neh->eh_entries = cpu_to_le16(1);
  853. neh->eh_magic = EXT4_EXT_MAGIC;
  854. neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
  855. neh->eh_depth = cpu_to_le16(depth - i);
  856. fidx = EXT_FIRST_INDEX(neh);
  857. fidx->ei_block = border;
  858. ext4_idx_store_pblock(fidx, oldblock);
  859. ext_debug("int.index at %d (block %llu): %u -> %llu\n",
  860. i, newblock, le32_to_cpu(border), oldblock);
  861. /* copy indexes */
  862. m = 0;
  863. path[i].p_idx++;
  864. ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
  865. EXT_MAX_INDEX(path[i].p_hdr));
  866. if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
  867. EXT_LAST_INDEX(path[i].p_hdr))) {
  868. EXT4_ERROR_INODE(inode,
  869. "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
  870. le32_to_cpu(path[i].p_ext->ee_block));
  871. err = -EIO;
  872. goto cleanup;
  873. }
  874. while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
  875. ext_debug("%d: move %d:%llu in new index %llu\n", i,
  876. le32_to_cpu(path[i].p_idx->ei_block),
  877. ext4_idx_pblock(path[i].p_idx),
  878. newblock);
  879. /*memmove(++fidx, path[i].p_idx++,
  880. sizeof(struct ext4_extent_idx));
  881. neh->eh_entries++;
  882. BUG_ON(neh->eh_entries > neh->eh_max);*/
  883. path[i].p_idx++;
  884. m++;
  885. }
  886. if (m) {
  887. memmove(++fidx, path[i].p_idx - m,
  888. sizeof(struct ext4_extent_idx) * m);
  889. le16_add_cpu(&neh->eh_entries, m);
  890. }
  891. set_buffer_uptodate(bh);
  892. unlock_buffer(bh);
  893. err = ext4_handle_dirty_metadata(handle, inode, bh);
  894. if (err)
  895. goto cleanup;
  896. brelse(bh);
  897. bh = NULL;
  898. /* correct old index */
  899. if (m) {
  900. err = ext4_ext_get_access(handle, inode, path + i);
  901. if (err)
  902. goto cleanup;
  903. le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
  904. err = ext4_ext_dirty(handle, inode, path + i);
  905. if (err)
  906. goto cleanup;
  907. }
  908. i--;
  909. }
  910. /* insert new index */
  911. err = ext4_ext_insert_index(handle, inode, path + at,
  912. le32_to_cpu(border), newblock);
  913. cleanup:
  914. if (bh) {
  915. if (buffer_locked(bh))
  916. unlock_buffer(bh);
  917. brelse(bh);
  918. }
  919. if (err) {
  920. /* free all allocated blocks in error case */
  921. for (i = 0; i < depth; i++) {
  922. if (!ablocks[i])
  923. continue;
  924. ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
  925. EXT4_FREE_BLOCKS_METADATA);
  926. }
  927. }
  928. kfree(ablocks);
  929. return err;
  930. }
  931. /*
  932. * ext4_ext_grow_indepth:
  933. * implements tree growing procedure:
  934. * - allocates new block
  935. * - moves top-level data (index block or leaf) into the new block
  936. * - initializes new top-level, creating index that points to the
  937. * just created block
  938. */
  939. static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
  940. unsigned int flags,
  941. struct ext4_ext_path *path,
  942. struct ext4_extent *newext)
  943. {
  944. struct ext4_ext_path *curp = path;
  945. struct ext4_extent_header *neh;
  946. struct buffer_head *bh;
  947. ext4_fsblk_t newblock;
  948. int err = 0;
  949. newblock = ext4_ext_new_meta_block(handle, inode, path,
  950. newext, &err, flags);
  951. if (newblock == 0)
  952. return err;
  953. bh = sb_getblk(inode->i_sb, newblock);
  954. if (!bh) {
  955. err = -EIO;
  956. ext4_std_error(inode->i_sb, err);
  957. return err;
  958. }
  959. lock_buffer(bh);
  960. err = ext4_journal_get_create_access(handle, bh);
  961. if (err) {
  962. unlock_buffer(bh);
  963. goto out;
  964. }
  965. /* move top-level index/leaf into new block */
  966. memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
  967. /* set size of new block */
  968. neh = ext_block_hdr(bh);
  969. /* old root could have indexes or leaves
  970. * so calculate e_max right way */
  971. if (ext_depth(inode))
  972. neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
  973. else
  974. neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
  975. neh->eh_magic = EXT4_EXT_MAGIC;
  976. set_buffer_uptodate(bh);
  977. unlock_buffer(bh);
  978. err = ext4_handle_dirty_metadata(handle, inode, bh);
  979. if (err)
  980. goto out;
  981. /* create index in new top-level index: num,max,pointer */
  982. err = ext4_ext_get_access(handle, inode, curp);
  983. if (err)
  984. goto out;
  985. curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
  986. curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
  987. curp->p_hdr->eh_entries = cpu_to_le16(1);
  988. curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
  989. if (path[0].p_hdr->eh_depth)
  990. curp->p_idx->ei_block =
  991. EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
  992. else
  993. curp->p_idx->ei_block =
  994. EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
  995. ext4_idx_store_pblock(curp->p_idx, newblock);
  996. neh = ext_inode_hdr(inode);
  997. ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
  998. le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
  999. le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
  1000. ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
  1001. neh->eh_depth = cpu_to_le16(path->p_depth + 1);
  1002. err = ext4_ext_dirty(handle, inode, curp);
  1003. out:
  1004. brelse(bh);
  1005. return err;
  1006. }
  1007. /*
  1008. * ext4_ext_create_new_leaf:
  1009. * finds empty index and adds new leaf.
  1010. * if no free index is found, then it requests in-depth growing.
  1011. */
  1012. static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
  1013. unsigned int flags,
  1014. struct ext4_ext_path *path,
  1015. struct ext4_extent *newext)
  1016. {
  1017. struct ext4_ext_path *curp;
  1018. int depth, i, err = 0;
  1019. repeat:
  1020. i = depth = ext_depth(inode);
  1021. /* walk up to the tree and look for free index entry */
  1022. curp = path + depth;
  1023. while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
  1024. i--;
  1025. curp--;
  1026. }
  1027. /* we use already allocated block for index block,
  1028. * so subsequent data blocks should be contiguous */
  1029. if (EXT_HAS_FREE_INDEX(curp)) {
  1030. /* if we found index with free entry, then use that
  1031. * entry: create all needed subtree and add new leaf */
  1032. err = ext4_ext_split(handle, inode, flags, path, newext, i);
  1033. if (err)
  1034. goto out;
  1035. /* refill path */
  1036. ext4_ext_drop_refs(path);
  1037. path = ext4_ext_find_extent(inode,
  1038. (ext4_lblk_t)le32_to_cpu(newext->ee_block),
  1039. path);
  1040. if (IS_ERR(path))
  1041. err = PTR_ERR(path);
  1042. } else {
  1043. /* tree is full, time to grow in depth */
  1044. err = ext4_ext_grow_indepth(handle, inode, flags,
  1045. path, newext);
  1046. if (err)
  1047. goto out;
  1048. /* refill path */
  1049. ext4_ext_drop_refs(path);
  1050. path = ext4_ext_find_extent(inode,
  1051. (ext4_lblk_t)le32_to_cpu(newext->ee_block),
  1052. path);
  1053. if (IS_ERR(path)) {
  1054. err = PTR_ERR(path);
  1055. goto out;
  1056. }
  1057. /*
  1058. * only first (depth 0 -> 1) produces free space;
  1059. * in all other cases we have to split the grown tree
  1060. */
  1061. depth = ext_depth(inode);
  1062. if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
  1063. /* now we need to split */
  1064. goto repeat;
  1065. }
  1066. }
  1067. out:
  1068. return err;
  1069. }
  1070. /*
  1071. * search the closest allocated block to the left for *logical
  1072. * and returns it at @logical + it's physical address at @phys
  1073. * if *logical is the smallest allocated block, the function
  1074. * returns 0 at @phys
  1075. * return value contains 0 (success) or error code
  1076. */
  1077. static int ext4_ext_search_left(struct inode *inode,
  1078. struct ext4_ext_path *path,
  1079. ext4_lblk_t *logical, ext4_fsblk_t *phys)
  1080. {
  1081. struct ext4_extent_idx *ix;
  1082. struct ext4_extent *ex;
  1083. int depth, ee_len;
  1084. if (unlikely(path == NULL)) {
  1085. EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
  1086. return -EIO;
  1087. }
  1088. depth = path->p_depth;
  1089. *phys = 0;
  1090. if (depth == 0 && path->p_ext == NULL)
  1091. return 0;
  1092. /* usually extent in the path covers blocks smaller
  1093. * then *logical, but it can be that extent is the
  1094. * first one in the file */
  1095. ex = path[depth].p_ext;
  1096. ee_len = ext4_ext_get_actual_len(ex);
  1097. if (*logical < le32_to_cpu(ex->ee_block)) {
  1098. if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
  1099. EXT4_ERROR_INODE(inode,
  1100. "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
  1101. *logical, le32_to_cpu(ex->ee_block));
  1102. return -EIO;
  1103. }
  1104. while (--depth >= 0) {
  1105. ix = path[depth].p_idx;
  1106. if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
  1107. EXT4_ERROR_INODE(inode,
  1108. "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
  1109. ix != NULL ? ix->ei_block : 0,
  1110. EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
  1111. EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
  1112. depth);
  1113. return -EIO;
  1114. }
  1115. }
  1116. return 0;
  1117. }
  1118. if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
  1119. EXT4_ERROR_INODE(inode,
  1120. "logical %d < ee_block %d + ee_len %d!",
  1121. *logical, le32_to_cpu(ex->ee_block), ee_len);
  1122. return -EIO;
  1123. }
  1124. *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
  1125. *phys = ext4_ext_pblock(ex) + ee_len - 1;
  1126. return 0;
  1127. }
  1128. /*
  1129. * search the closest allocated block to the right for *logical
  1130. * and returns it at @logical + it's physical address at @phys
  1131. * if *logical is the smallest allocated block, the function
  1132. * returns 0 at @phys
  1133. * return value contains 0 (success) or error code
  1134. */
  1135. static int ext4_ext_search_right(struct inode *inode,
  1136. struct ext4_ext_path *path,
  1137. ext4_lblk_t *logical, ext4_fsblk_t *phys)
  1138. {
  1139. struct buffer_head *bh = NULL;
  1140. struct ext4_extent_header *eh;
  1141. struct ext4_extent_idx *ix;
  1142. struct ext4_extent *ex;
  1143. ext4_fsblk_t block;
  1144. int depth; /* Note, NOT eh_depth; depth from top of tree */
  1145. int ee_len;
  1146. if (unlikely(path == NULL)) {
  1147. EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
  1148. return -EIO;
  1149. }
  1150. depth = path->p_depth;
  1151. *phys = 0;
  1152. if (depth == 0 && path->p_ext == NULL)
  1153. return 0;
  1154. /* usually extent in the path covers blocks smaller
  1155. * then *logical, but it can be that extent is the
  1156. * first one in the file */
  1157. ex = path[depth].p_ext;
  1158. ee_len = ext4_ext_get_actual_len(ex);
  1159. if (*logical < le32_to_cpu(ex->ee_block)) {
  1160. if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
  1161. EXT4_ERROR_INODE(inode,
  1162. "first_extent(path[%d].p_hdr) != ex",
  1163. depth);
  1164. return -EIO;
  1165. }
  1166. while (--depth >= 0) {
  1167. ix = path[depth].p_idx;
  1168. if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
  1169. EXT4_ERROR_INODE(inode,
  1170. "ix != EXT_FIRST_INDEX *logical %d!",
  1171. *logical);
  1172. return -EIO;
  1173. }
  1174. }
  1175. *logical = le32_to_cpu(ex->ee_block);
  1176. *phys = ext4_ext_pblock(ex);
  1177. return 0;
  1178. }
  1179. if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
  1180. EXT4_ERROR_INODE(inode,
  1181. "logical %d < ee_block %d + ee_len %d!",
  1182. *logical, le32_to_cpu(ex->ee_block), ee_len);
  1183. return -EIO;
  1184. }
  1185. if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
  1186. /* next allocated block in this leaf */
  1187. ex++;
  1188. *logical = le32_to_cpu(ex->ee_block);
  1189. *phys = ext4_ext_pblock(ex);
  1190. return 0;
  1191. }
  1192. /* go up and search for index to the right */
  1193. while (--depth >= 0) {
  1194. ix = path[depth].p_idx;
  1195. if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
  1196. goto got_index;
  1197. }
  1198. /* we've gone up to the root and found no index to the right */
  1199. return 0;
  1200. got_index:
  1201. /* we've found index to the right, let's
  1202. * follow it and find the closest allocated
  1203. * block to the right */
  1204. ix++;
  1205. block = ext4_idx_pblock(ix);
  1206. while (++depth < path->p_depth) {
  1207. bh = sb_bread(inode->i_sb, block);
  1208. if (bh == NULL)
  1209. return -EIO;
  1210. eh = ext_block_hdr(bh);
  1211. /* subtract from p_depth to get proper eh_depth */
  1212. if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
  1213. put_bh(bh);
  1214. return -EIO;
  1215. }
  1216. ix = EXT_FIRST_INDEX(eh);
  1217. block = ext4_idx_pblock(ix);
  1218. put_bh(bh);
  1219. }
  1220. bh = sb_bread(inode->i_sb, block);
  1221. if (bh == NULL)
  1222. return -EIO;
  1223. eh = ext_block_hdr(bh);
  1224. if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
  1225. put_bh(bh);
  1226. return -EIO;
  1227. }
  1228. ex = EXT_FIRST_EXTENT(eh);
  1229. *logical = le32_to_cpu(ex->ee_block);
  1230. *phys = ext4_ext_pblock(ex);
  1231. put_bh(bh);
  1232. return 0;
  1233. }
  1234. /*
  1235. * ext4_ext_next_allocated_block:
  1236. * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
  1237. * NOTE: it considers block number from index entry as
  1238. * allocated block. Thus, index entries have to be consistent
  1239. * with leaves.
  1240. */
  1241. static ext4_lblk_t
  1242. ext4_ext_next_allocated_block(struct ext4_ext_path *path)
  1243. {
  1244. int depth;
  1245. BUG_ON(path == NULL);
  1246. depth = path->p_depth;
  1247. if (depth == 0 && path->p_ext == NULL)
  1248. return EXT_MAX_BLOCK;
  1249. while (depth >= 0) {
  1250. if (depth == path->p_depth) {
  1251. /* leaf */
  1252. if (path[depth].p_ext !=
  1253. EXT_LAST_EXTENT(path[depth].p_hdr))
  1254. return le32_to_cpu(path[depth].p_ext[1].ee_block);
  1255. } else {
  1256. /* index */
  1257. if (path[depth].p_idx !=
  1258. EXT_LAST_INDEX(path[depth].p_hdr))
  1259. return le32_to_cpu(path[depth].p_idx[1].ei_block);
  1260. }
  1261. depth--;
  1262. }
  1263. return EXT_MAX_BLOCK;
  1264. }
  1265. /*
  1266. * ext4_ext_next_leaf_block:
  1267. * returns first allocated block from next leaf or EXT_MAX_BLOCK
  1268. */
  1269. static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
  1270. struct ext4_ext_path *path)
  1271. {
  1272. int depth;
  1273. BUG_ON(path == NULL);
  1274. depth = path->p_depth;
  1275. /* zero-tree has no leaf blocks at all */
  1276. if (depth == 0)
  1277. return EXT_MAX_BLOCK;
  1278. /* go to index block */
  1279. depth--;
  1280. while (depth >= 0) {
  1281. if (path[depth].p_idx !=
  1282. EXT_LAST_INDEX(path[depth].p_hdr))
  1283. return (ext4_lblk_t)
  1284. le32_to_cpu(path[depth].p_idx[1].ei_block);
  1285. depth--;
  1286. }
  1287. return EXT_MAX_BLOCK;
  1288. }
  1289. /*
  1290. * ext4_ext_correct_indexes:
  1291. * if leaf gets modified and modified extent is first in the leaf,
  1292. * then we have to correct all indexes above.
  1293. * TODO: do we need to correct tree in all cases?
  1294. */
  1295. static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
  1296. struct ext4_ext_path *path)
  1297. {
  1298. struct ext4_extent_header *eh;
  1299. int depth = ext_depth(inode);
  1300. struct ext4_extent *ex;
  1301. __le32 border;
  1302. int k, err = 0;
  1303. eh = path[depth].p_hdr;
  1304. ex = path[depth].p_ext;
  1305. if (unlikely(ex == NULL || eh == NULL)) {
  1306. EXT4_ERROR_INODE(inode,
  1307. "ex %p == NULL or eh %p == NULL", ex, eh);
  1308. return -EIO;
  1309. }
  1310. if (depth == 0) {
  1311. /* there is no tree at all */
  1312. return 0;
  1313. }
  1314. if (ex != EXT_FIRST_EXTENT(eh)) {
  1315. /* we correct tree if first leaf got modified only */
  1316. return 0;
  1317. }
  1318. /*
  1319. * TODO: we need correction if border is smaller than current one
  1320. */
  1321. k = depth - 1;
  1322. border = path[depth].p_ext->ee_block;
  1323. err = ext4_ext_get_access(handle, inode, path + k);
  1324. if (err)
  1325. return err;
  1326. path[k].p_idx->ei_block = border;
  1327. err = ext4_ext_dirty(handle, inode, path + k);
  1328. if (err)
  1329. return err;
  1330. while (k--) {
  1331. /* change all left-side indexes */
  1332. if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
  1333. break;
  1334. err = ext4_ext_get_access(handle, inode, path + k);
  1335. if (err)
  1336. break;
  1337. path[k].p_idx->ei_block = border;
  1338. err = ext4_ext_dirty(handle, inode, path + k);
  1339. if (err)
  1340. break;
  1341. }
  1342. return err;
  1343. }
  1344. int
  1345. ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
  1346. struct ext4_extent *ex2)
  1347. {
  1348. unsigned short ext1_ee_len, ext2_ee_len, max_len;
  1349. /*
  1350. * Make sure that either both extents are uninitialized, or
  1351. * both are _not_.
  1352. */
  1353. if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
  1354. return 0;
  1355. if (ext4_ext_is_uninitialized(ex1))
  1356. max_len = EXT_UNINIT_MAX_LEN;
  1357. else
  1358. max_len = EXT_INIT_MAX_LEN;
  1359. ext1_ee_len = ext4_ext_get_actual_len(ex1);
  1360. ext2_ee_len = ext4_ext_get_actual_len(ex2);
  1361. if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
  1362. le32_to_cpu(ex2->ee_block))
  1363. return 0;
  1364. /*
  1365. * To allow future support for preallocated extents to be added
  1366. * as an RO_COMPAT feature, refuse to merge to extents if
  1367. * this can result in the top bit of ee_len being set.
  1368. */
  1369. if (ext1_ee_len + ext2_ee_len > max_len)
  1370. return 0;
  1371. #ifdef AGGRESSIVE_TEST
  1372. if (ext1_ee_len >= 4)
  1373. return 0;
  1374. #endif
  1375. if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
  1376. return 1;
  1377. return 0;
  1378. }
  1379. /*
  1380. * This function tries to merge the "ex" extent to the next extent in the tree.
  1381. * It always tries to merge towards right. If you want to merge towards
  1382. * left, pass "ex - 1" as argument instead of "ex".
  1383. * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
  1384. * 1 if they got merged.
  1385. */
  1386. static int ext4_ext_try_to_merge_right(struct inode *inode,
  1387. struct ext4_ext_path *path,
  1388. struct ext4_extent *ex)
  1389. {
  1390. struct ext4_extent_header *eh;
  1391. unsigned int depth, len;
  1392. int merge_done = 0;
  1393. int uninitialized = 0;
  1394. depth = ext_depth(inode);
  1395. BUG_ON(path[depth].p_hdr == NULL);
  1396. eh = path[depth].p_hdr;
  1397. while (ex < EXT_LAST_EXTENT(eh)) {
  1398. if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
  1399. break;
  1400. /* merge with next extent! */
  1401. if (ext4_ext_is_uninitialized(ex))
  1402. uninitialized = 1;
  1403. ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
  1404. + ext4_ext_get_actual_len(ex + 1));
  1405. if (uninitialized)
  1406. ext4_ext_mark_uninitialized(ex);
  1407. if (ex + 1 < EXT_LAST_EXTENT(eh)) {
  1408. len = (EXT_LAST_EXTENT(eh) - ex - 1)
  1409. * sizeof(struct ext4_extent);
  1410. memmove(ex + 1, ex + 2, len);
  1411. }
  1412. le16_add_cpu(&eh->eh_entries, -1);
  1413. merge_done = 1;
  1414. WARN_ON(eh->eh_entries == 0);
  1415. if (!eh->eh_entries)
  1416. EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
  1417. }
  1418. return merge_done;
  1419. }
  1420. /*
  1421. * This function tries to merge the @ex extent to neighbours in the tree.
  1422. * return 1 if merge left else 0.
  1423. */
  1424. static int ext4_ext_try_to_merge(struct inode *inode,
  1425. struct ext4_ext_path *path,
  1426. struct ext4_extent *ex) {
  1427. struct ext4_extent_header *eh;
  1428. unsigned int depth;
  1429. int merge_done = 0;
  1430. int ret = 0;
  1431. depth = ext_depth(inode);
  1432. BUG_ON(path[depth].p_hdr == NULL);
  1433. eh = path[depth].p_hdr;
  1434. if (ex > EXT_FIRST_EXTENT(eh))
  1435. merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
  1436. if (!merge_done)
  1437. ret = ext4_ext_try_to_merge_right(inode, path, ex);
  1438. return ret;
  1439. }
  1440. /*
  1441. * check if a portion of the "newext" extent overlaps with an
  1442. * existing extent.
  1443. *
  1444. * If there is an overlap discovered, it updates the length of the newext
  1445. * such that there will be no overlap, and then returns 1.
  1446. * If there is no overlap found, it returns 0.
  1447. */
  1448. static unsigned int ext4_ext_check_overlap(struct inode *inode,
  1449. struct ext4_extent *newext,
  1450. struct ext4_ext_path *path)
  1451. {
  1452. ext4_lblk_t b1, b2;
  1453. unsigned int depth, len1;
  1454. unsigned int ret = 0;
  1455. b1 = le32_to_cpu(newext->ee_block);
  1456. len1 = ext4_ext_get_actual_len(newext);
  1457. depth = ext_depth(inode);
  1458. if (!path[depth].p_ext)
  1459. goto out;
  1460. b2 = le32_to_cpu(path[depth].p_ext->ee_block);
  1461. /*
  1462. * get the next allocated block if the extent in the path
  1463. * is before the requested block(s)
  1464. */
  1465. if (b2 < b1) {
  1466. b2 = ext4_ext_next_allocated_block(path);
  1467. if (b2 == EXT_MAX_BLOCK)
  1468. goto out;
  1469. }
  1470. /* check for wrap through zero on extent logical start block*/
  1471. if (b1 + len1 < b1) {
  1472. len1 = EXT_MAX_BLOCK - b1;
  1473. newext->ee_len = cpu_to_le16(len1);
  1474. ret = 1;
  1475. }
  1476. /* check for overlap */
  1477. if (b1 + len1 > b2) {
  1478. newext->ee_len = cpu_to_le16(b2 - b1);
  1479. ret = 1;
  1480. }
  1481. out:
  1482. return ret;
  1483. }
  1484. /*
  1485. * ext4_ext_insert_extent:
  1486. * tries to merge requsted extent into the existing extent or
  1487. * inserts requested extent as new one into the tree,
  1488. * creating new leaf in the no-space case.
  1489. */
  1490. int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
  1491. struct ext4_ext_path *path,
  1492. struct ext4_extent *newext, int flag)
  1493. {
  1494. struct ext4_extent_header *eh;
  1495. struct ext4_extent *ex, *fex;
  1496. struct ext4_extent *nearex; /* nearest extent */
  1497. struct ext4_ext_path *npath = NULL;
  1498. int depth, len, err;
  1499. ext4_lblk_t next;
  1500. unsigned uninitialized = 0;
  1501. int flags = 0;
  1502. if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
  1503. EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
  1504. return -EIO;
  1505. }
  1506. depth = ext_depth(inode);
  1507. ex = path[depth].p_ext;
  1508. if (unlikely(path[depth].p_hdr == NULL)) {
  1509. EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
  1510. return -EIO;
  1511. }
  1512. /* try to insert block into found extent and return */
  1513. if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
  1514. && ext4_can_extents_be_merged(inode, ex, newext)) {
  1515. ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
  1516. ext4_ext_is_uninitialized(newext),
  1517. ext4_ext_get_actual_len(newext),
  1518. le32_to_cpu(ex->ee_block),
  1519. ext4_ext_is_uninitialized(ex),
  1520. ext4_ext_get_actual_len(ex),
  1521. ext4_ext_pblock(ex));
  1522. err = ext4_ext_get_access(handle, inode, path + depth);
  1523. if (err)
  1524. return err;
  1525. /*
  1526. * ext4_can_extents_be_merged should have checked that either
  1527. * both extents are uninitialized, or both aren't. Thus we
  1528. * need to check only one of them here.
  1529. */
  1530. if (ext4_ext_is_uninitialized(ex))
  1531. uninitialized = 1;
  1532. ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
  1533. + ext4_ext_get_actual_len(newext));
  1534. if (uninitialized)
  1535. ext4_ext_mark_uninitialized(ex);
  1536. eh = path[depth].p_hdr;
  1537. nearex = ex;
  1538. goto merge;
  1539. }
  1540. repeat:
  1541. depth = ext_depth(inode);
  1542. eh = path[depth].p_hdr;
  1543. if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
  1544. goto has_space;
  1545. /* probably next leaf has space for us? */
  1546. fex = EXT_LAST_EXTENT(eh);
  1547. next = ext4_ext_next_leaf_block(inode, path);
  1548. if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
  1549. && next != EXT_MAX_BLOCK) {
  1550. ext_debug("next leaf block - %d\n", next);
  1551. BUG_ON(npath != NULL);
  1552. npath = ext4_ext_find_extent(inode, next, NULL);
  1553. if (IS_ERR(npath))
  1554. return PTR_ERR(npath);
  1555. BUG_ON(npath->p_depth != path->p_depth);
  1556. eh = npath[depth].p_hdr;
  1557. if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
  1558. ext_debug("next leaf isn't full(%d)\n",
  1559. le16_to_cpu(eh->eh_entries));
  1560. path = npath;
  1561. goto repeat;
  1562. }
  1563. ext_debug("next leaf has no free space(%d,%d)\n",
  1564. le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
  1565. }
  1566. /*
  1567. * There is no free space in the found leaf.
  1568. * We're gonna add a new leaf in the tree.
  1569. */
  1570. if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
  1571. flags = EXT4_MB_USE_ROOT_BLOCKS;
  1572. err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
  1573. if (err)
  1574. goto cleanup;
  1575. depth = ext_depth(inode);
  1576. eh = path[depth].p_hdr;
  1577. has_space:
  1578. nearex = path[depth].p_ext;
  1579. err = ext4_ext_get_access(handle, inode, path + depth);
  1580. if (err)
  1581. goto cleanup;
  1582. if (!nearex) {
  1583. /* there is no extent in this leaf, create first one */
  1584. ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
  1585. le32_to_cpu(newext->ee_block),
  1586. ext4_ext_pblock(newext),
  1587. ext4_ext_is_uninitialized(newext),
  1588. ext4_ext_get_actual_len(newext));
  1589. path[depth].p_ext = EXT_FIRST_EXTENT(eh);
  1590. } else if (le32_to_cpu(newext->ee_block)
  1591. > le32_to_cpu(nearex->ee_block)) {
  1592. /* BUG_ON(newext->ee_block == nearex->ee_block); */
  1593. if (nearex != EXT_LAST_EXTENT(eh)) {
  1594. len = EXT_MAX_EXTENT(eh) - nearex;
  1595. len = (len - 1) * sizeof(struct ext4_extent);
  1596. len = len < 0 ? 0 : len;
  1597. ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
  1598. "move %d from 0x%p to 0x%p\n",
  1599. le32_to_cpu(newext->ee_block),
  1600. ext4_ext_pblock(newext),
  1601. ext4_ext_is_uninitialized(newext),
  1602. ext4_ext_get_actual_len(newext),
  1603. nearex, len, nearex + 1, nearex + 2);
  1604. memmove(nearex + 2, nearex + 1, len);
  1605. }
  1606. path[depth].p_ext = nearex + 1;
  1607. } else {
  1608. BUG_ON(newext->ee_block == nearex->ee_block);
  1609. len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
  1610. len = len < 0 ? 0 : len;
  1611. ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
  1612. "move %d from 0x%p to 0x%p\n",
  1613. le32_to_cpu(newext->ee_block),
  1614. ext4_ext_pblock(newext),
  1615. ext4_ext_is_uninitialized(newext),
  1616. ext4_ext_get_actual_len(newext),
  1617. nearex, len, nearex + 1, nearex + 2);
  1618. memmove(nearex + 1, nearex, len);
  1619. path[depth].p_ext = nearex;
  1620. }
  1621. le16_add_cpu(&eh->eh_entries, 1);
  1622. nearex = path[depth].p_ext;
  1623. nearex->ee_block = newext->ee_block;
  1624. ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
  1625. nearex->ee_len = newext->ee_len;
  1626. merge:
  1627. /* try to merge extents to the right */
  1628. if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
  1629. ext4_ext_try_to_merge(inode, path, nearex);
  1630. /* try to merge extents to the left */
  1631. /* time to correct all indexes above */
  1632. err = ext4_ext_correct_indexes(handle, inode, path);
  1633. if (err)
  1634. goto cleanup;
  1635. err = ext4_ext_dirty(handle, inode, path + depth);
  1636. cleanup:
  1637. if (npath) {
  1638. ext4_ext_drop_refs(npath);
  1639. kfree(npath);
  1640. }
  1641. ext4_ext_invalidate_cache(inode);
  1642. return err;
  1643. }
  1644. static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
  1645. ext4_lblk_t num, ext_prepare_callback func,
  1646. void *cbdata)
  1647. {
  1648. struct ext4_ext_path *path = NULL;
  1649. struct ext4_ext_cache cbex;
  1650. struct ext4_extent *ex;
  1651. ext4_lblk_t next, start = 0, end = 0;
  1652. ext4_lblk_t last = block + num;
  1653. int depth, exists, err = 0;
  1654. BUG_ON(func == NULL);
  1655. BUG_ON(inode == NULL);
  1656. while (block < last && block != EXT_MAX_BLOCK) {
  1657. num = last - block;
  1658. /* find extent for this block */
  1659. down_read(&EXT4_I(inode)->i_data_sem);
  1660. path = ext4_ext_find_extent(inode, block, path);
  1661. up_read(&EXT4_I(inode)->i_data_sem);
  1662. if (IS_ERR(path)) {
  1663. err = PTR_ERR(path);
  1664. path = NULL;
  1665. break;
  1666. }
  1667. depth = ext_depth(inode);
  1668. if (unlikely(path[depth].p_hdr == NULL)) {
  1669. EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
  1670. err = -EIO;
  1671. break;
  1672. }
  1673. ex = path[depth].p_ext;
  1674. next = ext4_ext_next_allocated_block(path);
  1675. exists = 0;
  1676. if (!ex) {
  1677. /* there is no extent yet, so try to allocate
  1678. * all requested space */
  1679. start = block;
  1680. end = block + num;
  1681. } else if (le32_to_cpu(ex->ee_block) > block) {
  1682. /* need to allocate space before found extent */
  1683. start = block;
  1684. end = le32_to_cpu(ex->ee_block);
  1685. if (block + num < end)
  1686. end = block + num;
  1687. } else if (block >= le32_to_cpu(ex->ee_block)
  1688. + ext4_ext_get_actual_len(ex)) {
  1689. /* need to allocate space after found extent */
  1690. start = block;
  1691. end = block + num;
  1692. if (end >= next)
  1693. end = next;
  1694. } else if (block >= le32_to_cpu(ex->ee_block)) {
  1695. /*
  1696. * some part of requested space is covered
  1697. * by found extent
  1698. */
  1699. start = block;
  1700. end = le32_to_cpu(ex->ee_block)
  1701. + ext4_ext_get_actual_len(ex);
  1702. if (block + num < end)
  1703. end = block + num;
  1704. exists = 1;
  1705. } else {
  1706. BUG();
  1707. }
  1708. BUG_ON(end <= start);
  1709. if (!exists) {
  1710. cbex.ec_block = start;
  1711. cbex.ec_len = end - start;
  1712. cbex.ec_start = 0;
  1713. } else {
  1714. cbex.ec_block = le32_to_cpu(ex->ee_block);
  1715. cbex.ec_len = ext4_ext_get_actual_len(ex);
  1716. cbex.ec_start = ext4_ext_pblock(ex);
  1717. }
  1718. if (unlikely(cbex.ec_len == 0)) {
  1719. EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
  1720. err = -EIO;
  1721. break;
  1722. }
  1723. err = func(inode, path, &cbex, ex, cbdata);
  1724. ext4_ext_drop_refs(path);
  1725. if (err < 0)
  1726. break;
  1727. if (err == EXT_REPEAT)
  1728. continue;
  1729. else if (err == EXT_BREAK) {
  1730. err = 0;
  1731. break;
  1732. }
  1733. if (ext_depth(inode) != depth) {
  1734. /* depth was changed. we have to realloc path */
  1735. kfree(path);
  1736. path = NULL;
  1737. }
  1738. block = cbex.ec_block + cbex.ec_len;
  1739. }
  1740. if (path) {
  1741. ext4_ext_drop_refs(path);
  1742. kfree(path);
  1743. }
  1744. return err;
  1745. }
  1746. static void
  1747. ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
  1748. __u32 len, ext4_fsblk_t start)
  1749. {
  1750. struct ext4_ext_cache *cex;
  1751. BUG_ON(len == 0);
  1752. spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
  1753. cex = &EXT4_I(inode)->i_cached_extent;
  1754. cex->ec_block = block;
  1755. cex->ec_len = len;
  1756. cex->ec_start = start;
  1757. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  1758. }
  1759. /*
  1760. * ext4_ext_put_gap_in_cache:
  1761. * calculate boundaries of the gap that the requested block fits into
  1762. * and cache this gap
  1763. */
  1764. static void
  1765. ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
  1766. ext4_lblk_t block)
  1767. {
  1768. int depth = ext_depth(inode);
  1769. unsigned long len;
  1770. ext4_lblk_t lblock;
  1771. struct ext4_extent *ex;
  1772. ex = path[depth].p_ext;
  1773. if (ex == NULL) {
  1774. /* there is no extent yet, so gap is [0;-] */
  1775. lblock = 0;
  1776. len = EXT_MAX_BLOCK;
  1777. ext_debug("cache gap(whole file):");
  1778. } else if (block < le32_to_cpu(ex->ee_block)) {
  1779. lblock = block;
  1780. len = le32_to_cpu(ex->ee_block) - block;
  1781. ext_debug("cache gap(before): %u [%u:%u]",
  1782. block,
  1783. le32_to_cpu(ex->ee_block),
  1784. ext4_ext_get_actual_len(ex));
  1785. } else if (block >= le32_to_cpu(ex->ee_block)
  1786. + ext4_ext_get_actual_len(ex)) {
  1787. ext4_lblk_t next;
  1788. lblock = le32_to_cpu(ex->ee_block)
  1789. + ext4_ext_get_actual_len(ex);
  1790. next = ext4_ext_next_allocated_block(path);
  1791. ext_debug("cache gap(after): [%u:%u] %u",
  1792. le32_to_cpu(ex->ee_block),
  1793. ext4_ext_get_actual_len(ex),
  1794. block);
  1795. BUG_ON(next == lblock);
  1796. len = next - lblock;
  1797. } else {
  1798. lblock = len = 0;
  1799. BUG();
  1800. }
  1801. ext_debug(" -> %u:%lu\n", lblock, len);
  1802. ext4_ext_put_in_cache(inode, lblock, len, 0);
  1803. }
  1804. /*
  1805. * Return 0 if cache is invalid; 1 if the cache is valid
  1806. */
  1807. static int
  1808. ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
  1809. struct ext4_extent *ex)
  1810. {
  1811. struct ext4_ext_cache *cex;
  1812. struct ext4_sb_info *sbi;
  1813. int ret = 0;
  1814. /*
  1815. * We borrow i_block_reservation_lock to protect i_cached_extent
  1816. */
  1817. spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
  1818. cex = &EXT4_I(inode)->i_cached_extent;
  1819. sbi = EXT4_SB(inode->i_sb);
  1820. /* has cache valid data? */
  1821. if (cex->ec_len == 0)
  1822. goto errout;
  1823. if (in_range(block, cex->ec_block, cex->ec_len)) {
  1824. ex->ee_block = cpu_to_le32(cex->ec_block);
  1825. ext4_ext_store_pblock(ex, cex->ec_start);
  1826. ex->ee_len = cpu_to_le16(cex->ec_len);
  1827. ext_debug("%u cached by %u:%u:%llu\n",
  1828. block,
  1829. cex->ec_block, cex->ec_len, cex->ec_start);
  1830. ret = 1;
  1831. }
  1832. errout:
  1833. if (!ret)
  1834. sbi->extent_cache_misses++;
  1835. else
  1836. sbi->extent_cache_hits++;
  1837. spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
  1838. return ret;
  1839. }
  1840. /*
  1841. * ext4_ext_rm_idx:
  1842. * removes index from the index block.
  1843. * It's used in truncate case only, thus all requests are for
  1844. * last index in the block only.
  1845. */
  1846. static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
  1847. struct ext4_ext_path *path)
  1848. {
  1849. int err;
  1850. ext4_fsblk_t leaf;
  1851. /* free index block */
  1852. path--;
  1853. leaf = ext4_idx_pblock(path->p_idx);
  1854. if (unlikely(path->p_hdr->eh_entries == 0)) {
  1855. EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
  1856. return -EIO;
  1857. }
  1858. err = ext4_ext_get_access(handle, inode, path);
  1859. if (err)
  1860. return err;
  1861. le16_add_cpu(&path->p_hdr->eh_entries, -1);
  1862. err = ext4_ext_dirty(handle, inode, path);
  1863. if (err)
  1864. return err;
  1865. ext_debug("index is empty, remove it, free block %llu\n", leaf);
  1866. ext4_free_blocks(handle, inode, NULL, leaf, 1,
  1867. EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
  1868. return err;
  1869. }
  1870. /*
  1871. * ext4_ext_calc_credits_for_single_extent:
  1872. * This routine returns max. credits that needed to insert an extent
  1873. * to the extent tree.
  1874. * When pass the actual path, the caller should calculate credits
  1875. * under i_data_sem.
  1876. */
  1877. int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
  1878. struct ext4_ext_path *path)
  1879. {
  1880. if (path) {
  1881. int depth = ext_depth(inode);
  1882. int ret = 0;
  1883. /* probably there is space in leaf? */
  1884. if (le16_to_cpu(path[depth].p_hdr->eh_entries)
  1885. < le16_to_cpu(path[depth].p_hdr->eh_max)) {
  1886. /*
  1887. * There are some space in the leaf tree, no
  1888. * need to account for leaf block credit
  1889. *
  1890. * bitmaps and block group descriptor blocks
  1891. * and other metadat blocks still need to be
  1892. * accounted.
  1893. */
  1894. /* 1 bitmap, 1 block group descriptor */
  1895. ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
  1896. return ret;
  1897. }
  1898. }
  1899. return ext4_chunk_trans_blocks(inode, nrblocks);
  1900. }
  1901. /*
  1902. * How many index/leaf blocks need to change/allocate to modify nrblocks?
  1903. *
  1904. * if nrblocks are fit in a single extent (chunk flag is 1), then
  1905. * in the worse case, each tree level index/leaf need to be changed
  1906. * if the tree split due to insert a new extent, then the old tree
  1907. * index/leaf need to be updated too
  1908. *
  1909. * If the nrblocks are discontiguous, they could cause
  1910. * the whole tree split more than once, but this is really rare.
  1911. */
  1912. int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  1913. {
  1914. int index;
  1915. int depth = ext_depth(inode);
  1916. if (chunk)
  1917. index = depth * 2;
  1918. else
  1919. index = depth * 3;
  1920. return index;
  1921. }
  1922. static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
  1923. struct ext4_extent *ex,
  1924. ext4_lblk_t from, ext4_lblk_t to)
  1925. {
  1926. unsigned short ee_len = ext4_ext_get_actual_len(ex);
  1927. int flags = EXT4_FREE_BLOCKS_FORGET;
  1928. if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
  1929. flags |= EXT4_FREE_BLOCKS_METADATA;
  1930. #ifdef EXTENTS_STATS
  1931. {
  1932. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  1933. spin_lock(&sbi->s_ext_stats_lock);
  1934. sbi->s_ext_blocks += ee_len;
  1935. sbi->s_ext_extents++;
  1936. if (ee_len < sbi->s_ext_min)
  1937. sbi->s_ext_min = ee_len;
  1938. if (ee_len > sbi->s_ext_max)
  1939. sbi->s_ext_max = ee_len;
  1940. if (ext_depth(inode) > sbi->s_depth_max)
  1941. sbi->s_depth_max = ext_depth(inode);
  1942. spin_unlock(&sbi->s_ext_stats_lock);
  1943. }
  1944. #endif
  1945. if (from >= le32_to_cpu(ex->ee_block)
  1946. && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
  1947. /* tail removal */
  1948. ext4_lblk_t num;
  1949. ext4_fsblk_t start;
  1950. num = le32_to_cpu(ex->ee_block) + ee_len - from;
  1951. start = ext4_ext_pblock(ex) + ee_len - num;
  1952. ext_debug("free last %u blocks starting %llu\n", num, start);
  1953. ext4_free_blocks(handle, inode, NULL, start, num, flags);
  1954. } else if (from == le32_to_cpu(ex->ee_block)
  1955. && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
  1956. printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
  1957. from, to, le32_to_cpu(ex->ee_block), ee_len);
  1958. } else {
  1959. printk(KERN_INFO "strange request: removal(2) "
  1960. "%u-%u from %u:%u\n",
  1961. from, to, le32_to_cpu(ex->ee_block), ee_len);
  1962. }
  1963. return 0;
  1964. }
  1965. static int
  1966. ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
  1967. struct ext4_ext_path *path, ext4_lblk_t start)
  1968. {
  1969. int err = 0, correct_index = 0;
  1970. int depth = ext_depth(inode), credits;
  1971. struct ext4_extent_header *eh;
  1972. ext4_lblk_t a, b, block;
  1973. unsigned num;
  1974. ext4_lblk_t ex_ee_block;
  1975. unsigned short ex_ee_len;
  1976. unsigned uninitialized = 0;
  1977. struct ext4_extent *ex;
  1978. /* the header must be checked already in ext4_ext_remove_space() */
  1979. ext_debug("truncate since %u in leaf\n", start);
  1980. if (!path[depth].p_hdr)
  1981. path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
  1982. eh = path[depth].p_hdr;
  1983. if (unlikely(path[depth].p_hdr == NULL)) {
  1984. EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
  1985. return -EIO;
  1986. }
  1987. /* find where to start removing */
  1988. ex = EXT_LAST_EXTENT(eh);
  1989. ex_ee_block = le32_to_cpu(ex->ee_block);
  1990. ex_ee_len = ext4_ext_get_actual_len(ex);
  1991. while (ex >= EXT_FIRST_EXTENT(eh) &&
  1992. ex_ee_block + ex_ee_len > start) {
  1993. if (ext4_ext_is_uninitialized(ex))
  1994. uninitialized = 1;
  1995. else
  1996. uninitialized = 0;
  1997. ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
  1998. uninitialized, ex_ee_len);
  1999. path[depth].p_ext = ex;
  2000. a = ex_ee_block > start ? ex_ee_block : start;
  2001. b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
  2002. ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
  2003. ext_debug(" border %u:%u\n", a, b);
  2004. if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
  2005. block = 0;
  2006. num = 0;
  2007. BUG();
  2008. } else if (a != ex_ee_block) {
  2009. /* remove tail of the extent */
  2010. block = ex_ee_block;
  2011. num = a - block;
  2012. } else if (b != ex_ee_block + ex_ee_len - 1) {
  2013. /* remove head of the extent */
  2014. block = a;
  2015. num = b - a;
  2016. /* there is no "make a hole" API yet */
  2017. BUG();
  2018. } else {
  2019. /* remove whole extent: excellent! */
  2020. block = ex_ee_block;
  2021. num = 0;
  2022. BUG_ON(a != ex_ee_block);
  2023. BUG_ON(b != ex_ee_block + ex_ee_len - 1);
  2024. }
  2025. /*
  2026. * 3 for leaf, sb, and inode plus 2 (bmap and group
  2027. * descriptor) for each block group; assume two block
  2028. * groups plus ex_ee_len/blocks_per_block_group for
  2029. * the worst case
  2030. */
  2031. credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
  2032. if (ex == EXT_FIRST_EXTENT(eh)) {
  2033. correct_index = 1;
  2034. credits += (ext_depth(inode)) + 1;
  2035. }
  2036. credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
  2037. err = ext4_ext_truncate_extend_restart(handle, inode, credits);
  2038. if (err)
  2039. goto out;
  2040. err = ext4_ext_get_access(handle, inode, path + depth);
  2041. if (err)
  2042. goto out;
  2043. err = ext4_remove_blocks(handle, inode, ex, a, b);
  2044. if (err)
  2045. goto out;
  2046. if (num == 0) {
  2047. /* this extent is removed; mark slot entirely unused */
  2048. ext4_ext_store_pblock(ex, 0);
  2049. le16_add_cpu(&eh->eh_entries, -1);
  2050. }
  2051. ex->ee_block = cpu_to_le32(block);
  2052. ex->ee_len = cpu_to_le16(num);
  2053. /*
  2054. * Do not mark uninitialized if all the blocks in the
  2055. * extent have been removed.
  2056. */
  2057. if (uninitialized && num)
  2058. ext4_ext_mark_uninitialized(ex);
  2059. err = ext4_ext_dirty(handle, inode, path + depth);
  2060. if (err)
  2061. goto out;
  2062. ext_debug("new extent: %u:%u:%llu\n", block, num,
  2063. ext4_ext_pblock(ex));
  2064. ex--;
  2065. ex_ee_block = le32_to_cpu(ex->ee_block);
  2066. ex_ee_len = ext4_ext_get_actual_len(ex);
  2067. }
  2068. if (correct_index && eh->eh_entries)
  2069. err = ext4_ext_correct_indexes(handle, inode, path);
  2070. /* if this leaf is free, then we should
  2071. * remove it from index block above */
  2072. if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
  2073. err = ext4_ext_rm_idx(handle, inode, path + depth);
  2074. out:
  2075. return err;
  2076. }
  2077. /*
  2078. * ext4_ext_more_to_rm:
  2079. * returns 1 if current index has to be freed (even partial)
  2080. */
  2081. static int
  2082. ext4_ext_more_to_rm(struct ext4_ext_path *path)
  2083. {
  2084. BUG_ON(path->p_idx == NULL);
  2085. if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
  2086. return 0;
  2087. /*
  2088. * if truncate on deeper level happened, it wasn't partial,
  2089. * so we have to consider current index for truncation
  2090. */
  2091. if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
  2092. return 0;
  2093. return 1;
  2094. }
  2095. static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
  2096. {
  2097. struct super_block *sb = inode->i_sb;
  2098. int depth = ext_depth(inode);
  2099. struct ext4_ext_path *path;
  2100. handle_t *handle;
  2101. int i, err;
  2102. ext_debug("truncate since %u\n", start);
  2103. /* probably first extent we're gonna free will be last in block */
  2104. handle = ext4_journal_start(inode, depth + 1);
  2105. if (IS_ERR(handle))
  2106. return PTR_ERR(handle);
  2107. again:
  2108. ext4_ext_invalidate_cache(inode);
  2109. /*
  2110. * We start scanning from right side, freeing all the blocks
  2111. * after i_size and walking into the tree depth-wise.
  2112. */
  2113. depth = ext_depth(inode);
  2114. path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
  2115. if (path == NULL) {
  2116. ext4_journal_stop(handle);
  2117. return -ENOMEM;
  2118. }
  2119. path[0].p_depth = depth;
  2120. path[0].p_hdr = ext_inode_hdr(inode);
  2121. if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
  2122. err = -EIO;
  2123. goto out;
  2124. }
  2125. i = err = 0;
  2126. while (i >= 0 && err == 0) {
  2127. if (i == depth) {
  2128. /* this is leaf block */
  2129. err = ext4_ext_rm_leaf(handle, inode, path, start);
  2130. /* root level has p_bh == NULL, brelse() eats this */
  2131. brelse(path[i].p_bh);
  2132. path[i].p_bh = NULL;
  2133. i--;
  2134. continue;
  2135. }
  2136. /* this is index block */
  2137. if (!path[i].p_hdr) {
  2138. ext_debug("initialize header\n");
  2139. path[i].p_hdr = ext_block_hdr(path[i].p_bh);
  2140. }
  2141. if (!path[i].p_idx) {
  2142. /* this level hasn't been touched yet */
  2143. path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
  2144. path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
  2145. ext_debug("init index ptr: hdr 0x%p, num %d\n",
  2146. path[i].p_hdr,
  2147. le16_to_cpu(path[i].p_hdr->eh_entries));
  2148. } else {
  2149. /* we were already here, see at next index */
  2150. path[i].p_idx--;
  2151. }
  2152. ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
  2153. i, EXT_FIRST_INDEX(path[i].p_hdr),
  2154. path[i].p_idx);
  2155. if (ext4_ext_more_to_rm(path + i)) {
  2156. struct buffer_head *bh;
  2157. /* go to the next level */
  2158. ext_debug("move to level %d (block %llu)\n",
  2159. i + 1, ext4_idx_pblock(path[i].p_idx));
  2160. memset(path + i + 1, 0, sizeof(*path));
  2161. bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
  2162. if (!bh) {
  2163. /* should we reset i_size? */
  2164. err = -EIO;
  2165. break;
  2166. }
  2167. if (WARN_ON(i + 1 > depth)) {
  2168. err = -EIO;
  2169. break;
  2170. }
  2171. if (ext4_ext_check(inode, ext_block_hdr(bh),
  2172. depth - i - 1)) {
  2173. err = -EIO;
  2174. break;
  2175. }
  2176. path[i + 1].p_bh = bh;
  2177. /* save actual number of indexes since this
  2178. * number is changed at the next iteration */
  2179. path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
  2180. i++;
  2181. } else {
  2182. /* we finished processing this index, go up */
  2183. if (path[i].p_hdr->eh_entries == 0 && i > 0) {
  2184. /* index is empty, remove it;
  2185. * handle must be already prepared by the
  2186. * truncatei_leaf() */
  2187. err = ext4_ext_rm_idx(handle, inode, path + i);
  2188. }
  2189. /* root level has p_bh == NULL, brelse() eats this */
  2190. brelse(path[i].p_bh);
  2191. path[i].p_bh = NULL;
  2192. i--;
  2193. ext_debug("return to level %d\n", i);
  2194. }
  2195. }
  2196. /* TODO: flexible tree reduction should be here */
  2197. if (path->p_hdr->eh_entries == 0) {
  2198. /*
  2199. * truncate to zero freed all the tree,
  2200. * so we need to correct eh_depth
  2201. */
  2202. err = ext4_ext_get_access(handle, inode, path);
  2203. if (err == 0) {
  2204. ext_inode_hdr(inode)->eh_depth = 0;
  2205. ext_inode_hdr(inode)->eh_max =
  2206. cpu_to_le16(ext4_ext_space_root(inode, 0));
  2207. err = ext4_ext_dirty(handle, inode, path);
  2208. }
  2209. }
  2210. out:
  2211. ext4_ext_drop_refs(path);
  2212. kfree(path);
  2213. if (err == -EAGAIN)
  2214. goto again;
  2215. ext4_journal_stop(handle);
  2216. return err;
  2217. }
  2218. /*
  2219. * called at mount time
  2220. */
  2221. void ext4_ext_init(struct super_block *sb)
  2222. {
  2223. /*
  2224. * possible initialization would be here
  2225. */
  2226. if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
  2227. #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
  2228. printk(KERN_INFO "EXT4-fs: file extents enabled");
  2229. #ifdef AGGRESSIVE_TEST
  2230. printk(", aggressive tests");
  2231. #endif
  2232. #ifdef CHECK_BINSEARCH
  2233. printk(", check binsearch");
  2234. #endif
  2235. #ifdef EXTENTS_STATS
  2236. printk(", stats");
  2237. #endif
  2238. printk("\n");
  2239. #endif
  2240. #ifdef EXTENTS_STATS
  2241. spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
  2242. EXT4_SB(sb)->s_ext_min = 1 << 30;
  2243. EXT4_SB(sb)->s_ext_max = 0;
  2244. #endif
  2245. }
  2246. }
  2247. /*
  2248. * called at umount time
  2249. */
  2250. void ext4_ext_release(struct super_block *sb)
  2251. {
  2252. if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
  2253. return;
  2254. #ifdef EXTENTS_STATS
  2255. if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
  2256. struct ext4_sb_info *sbi = EXT4_SB(sb);
  2257. printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
  2258. sbi->s_ext_blocks, sbi->s_ext_extents,
  2259. sbi->s_ext_blocks / sbi->s_ext_extents);
  2260. printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
  2261. sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
  2262. }
  2263. #endif
  2264. }
  2265. /* FIXME!! we need to try to merge to left or right after zero-out */
  2266. static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
  2267. {
  2268. ext4_fsblk_t ee_pblock;
  2269. unsigned int ee_len;
  2270. int ret;
  2271. ee_len = ext4_ext_get_actual_len(ex);
  2272. ee_pblock = ext4_ext_pblock(ex);
  2273. ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
  2274. if (ret > 0)
  2275. ret = 0;
  2276. return ret;
  2277. }
  2278. /*
  2279. * used by extent splitting.
  2280. */
  2281. #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
  2282. due to ENOSPC */
  2283. #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
  2284. #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
  2285. /*
  2286. * ext4_split_extent_at() splits an extent at given block.
  2287. *
  2288. * @handle: the journal handle
  2289. * @inode: the file inode
  2290. * @path: the path to the extent
  2291. * @split: the logical block where the extent is splitted.
  2292. * @split_flags: indicates if the extent could be zeroout if split fails, and
  2293. * the states(init or uninit) of new extents.
  2294. * @flags: flags used to insert new extent to extent tree.
  2295. *
  2296. *
  2297. * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
  2298. * of which are deterimined by split_flag.
  2299. *
  2300. * There are two cases:
  2301. * a> the extent are splitted into two extent.
  2302. * b> split is not needed, and just mark the extent.
  2303. *
  2304. * return 0 on success.
  2305. */
  2306. static int ext4_split_extent_at(handle_t *handle,
  2307. struct inode *inode,
  2308. struct ext4_ext_path *path,
  2309. ext4_lblk_t split,
  2310. int split_flag,
  2311. int flags)
  2312. {
  2313. ext4_fsblk_t newblock;
  2314. ext4_lblk_t ee_block;
  2315. struct ext4_extent *ex, newex, orig_ex;
  2316. struct ext4_extent *ex2 = NULL;
  2317. unsigned int ee_len, depth;
  2318. int err = 0;
  2319. ext_debug("ext4_split_extents_at: inode %lu, logical"
  2320. "block %llu\n", inode->i_ino, (unsigned long long)split);
  2321. ext4_ext_show_leaf(inode, path);
  2322. depth = ext_depth(inode);
  2323. ex = path[depth].p_ext;
  2324. ee_block = le32_to_cpu(ex->ee_block);
  2325. ee_len = ext4_ext_get_actual_len(ex);
  2326. newblock = split - ee_block + ext4_ext_pblock(ex);
  2327. BUG_ON(split < ee_block || split >= (ee_block + ee_len));
  2328. err = ext4_ext_get_access(handle, inode, path + depth);
  2329. if (err)
  2330. goto out;
  2331. if (split == ee_block) {
  2332. /*
  2333. * case b: block @split is the block that the extent begins with
  2334. * then we just change the state of the extent, and splitting
  2335. * is not needed.
  2336. */
  2337. if (split_flag & EXT4_EXT_MARK_UNINIT2)
  2338. ext4_ext_mark_uninitialized(ex);
  2339. else
  2340. ext4_ext_mark_initialized(ex);
  2341. if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
  2342. ext4_ext_try_to_merge(inode, path, ex);
  2343. err = ext4_ext_dirty(handle, inode, path + depth);
  2344. goto out;
  2345. }
  2346. /* case a */
  2347. memcpy(&orig_ex, ex, sizeof(orig_ex));
  2348. ex->ee_len = cpu_to_le16(split - ee_block);
  2349. if (split_flag & EXT4_EXT_MARK_UNINIT1)
  2350. ext4_ext_mark_uninitialized(ex);
  2351. /*
  2352. * path may lead to new leaf, not to original leaf any more
  2353. * after ext4_ext_insert_extent() returns,
  2354. */
  2355. err = ext4_ext_dirty(handle, inode, path + depth);
  2356. if (err)
  2357. goto fix_extent_len;
  2358. ex2 = &newex;
  2359. ex2->ee_block = cpu_to_le32(split);
  2360. ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
  2361. ext4_ext_store_pblock(ex2, newblock);
  2362. if (split_flag & EXT4_EXT_MARK_UNINIT2)
  2363. ext4_ext_mark_uninitialized(ex2);
  2364. err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
  2365. if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
  2366. err = ext4_ext_zeroout(inode, &orig_ex);
  2367. if (err)
  2368. goto fix_extent_len;
  2369. /* update the extent length and mark as initialized */
  2370. ex->ee_len = cpu_to_le32(ee_len);
  2371. ext4_ext_try_to_merge(inode, path, ex);
  2372. err = ext4_ext_dirty(handle, inode, path + depth);
  2373. goto out;
  2374. } else if (err)
  2375. goto fix_extent_len;
  2376. out:
  2377. ext4_ext_show_leaf(inode, path);
  2378. return err;
  2379. fix_extent_len:
  2380. ex->ee_len = orig_ex.ee_len;
  2381. ext4_ext_dirty(handle, inode, path + depth);
  2382. return err;
  2383. }
  2384. /*
  2385. * ext4_split_extents() splits an extent and mark extent which is covered
  2386. * by @map as split_flags indicates
  2387. *
  2388. * It may result in splitting the extent into multiple extents (upto three)
  2389. * There are three possibilities:
  2390. * a> There is no split required
  2391. * b> Splits in two extents: Split is happening at either end of the extent
  2392. * c> Splits in three extents: Somone is splitting in middle of the extent
  2393. *
  2394. */
  2395. static int ext4_split_extent(handle_t *handle,
  2396. struct inode *inode,
  2397. struct ext4_ext_path *path,
  2398. struct ext4_map_blocks *map,
  2399. int split_flag,
  2400. int flags)
  2401. {
  2402. ext4_lblk_t ee_block;
  2403. struct ext4_extent *ex;
  2404. unsigned int ee_len, depth;
  2405. int err = 0;
  2406. int uninitialized;
  2407. int split_flag1, flags1;
  2408. depth = ext_depth(inode);
  2409. ex = path[depth].p_ext;
  2410. ee_block = le32_to_cpu(ex->ee_block);
  2411. ee_len = ext4_ext_get_actual_len(ex);
  2412. uninitialized = ext4_ext_is_uninitialized(ex);
  2413. if (map->m_lblk + map->m_len < ee_block + ee_len) {
  2414. split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
  2415. EXT4_EXT_MAY_ZEROOUT : 0;
  2416. flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
  2417. if (uninitialized)
  2418. split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
  2419. EXT4_EXT_MARK_UNINIT2;
  2420. err = ext4_split_extent_at(handle, inode, path,
  2421. map->m_lblk + map->m_len, split_flag1, flags1);
  2422. if (err)
  2423. goto out;
  2424. }
  2425. ext4_ext_drop_refs(path);
  2426. path = ext4_ext_find_extent(inode, map->m_lblk, path);
  2427. if (IS_ERR(path))
  2428. return PTR_ERR(path);
  2429. if (map->m_lblk >= ee_block) {
  2430. split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
  2431. EXT4_EXT_MAY_ZEROOUT : 0;
  2432. if (uninitialized)
  2433. split_flag1 |= EXT4_EXT_MARK_UNINIT1;
  2434. if (split_flag & EXT4_EXT_MARK_UNINIT2)
  2435. split_flag1 |= EXT4_EXT_MARK_UNINIT2;
  2436. err = ext4_split_extent_at(handle, inode, path,
  2437. map->m_lblk, split_flag1, flags);
  2438. if (err)
  2439. goto out;
  2440. }
  2441. ext4_ext_show_leaf(inode, path);
  2442. out:
  2443. return err ? err : map->m_len;
  2444. }
  2445. #define EXT4_EXT_ZERO_LEN 7
  2446. /*
  2447. * This function is called by ext4_ext_map_blocks() if someone tries to write
  2448. * to an uninitialized extent. It may result in splitting the uninitialized
  2449. * extent into multiple extents (up to three - one initialized and two
  2450. * uninitialized).
  2451. * There are three possibilities:
  2452. * a> There is no split required: Entire extent should be initialized
  2453. * b> Splits in two extents: Write is happening at either end of the extent
  2454. * c> Splits in three extents: Somone is writing in middle of the extent
  2455. */
  2456. static int ext4_ext_convert_to_initialized(handle_t *handle,
  2457. struct inode *inode,
  2458. struct ext4_map_blocks *map,
  2459. struct ext4_ext_path *path)
  2460. {
  2461. struct ext4_map_blocks split_map;
  2462. struct ext4_extent zero_ex;
  2463. struct ext4_extent *ex;
  2464. ext4_lblk_t ee_block, eof_block;
  2465. unsigned int allocated, ee_len, depth;
  2466. int err = 0;
  2467. int split_flag = 0;
  2468. ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
  2469. "block %llu, max_blocks %u\n", inode->i_ino,
  2470. (unsigned long long)map->m_lblk, map->m_len);
  2471. eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
  2472. inode->i_sb->s_blocksize_bits;
  2473. if (eof_block < map->m_lblk + map->m_len)
  2474. eof_block = map->m_lblk + map->m_len;
  2475. depth = ext_depth(inode);
  2476. ex = path[depth].p_ext;
  2477. ee_block = le32_to_cpu(ex->ee_block);
  2478. ee_len = ext4_ext_get_actual_len(ex);
  2479. allocated = ee_len - (map->m_lblk - ee_block);
  2480. WARN_ON(map->m_lblk < ee_block);
  2481. /*
  2482. * It is safe to convert extent to initialized via explicit
  2483. * zeroout only if extent is fully insde i_size or new_size.
  2484. */
  2485. split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
  2486. /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
  2487. if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
  2488. (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
  2489. err = ext4_ext_zeroout(inode, ex);
  2490. if (err)
  2491. goto out;
  2492. err = ext4_ext_get_access(handle, inode, path + depth);
  2493. if (err)
  2494. goto out;
  2495. ext4_ext_mark_initialized(ex);
  2496. ext4_ext_try_to_merge(inode, path, ex);
  2497. err = ext4_ext_dirty(handle, inode, path + depth);
  2498. goto out;
  2499. }
  2500. /*
  2501. * four cases:
  2502. * 1. split the extent into three extents.
  2503. * 2. split the extent into two extents, zeroout the first half.
  2504. * 3. split the extent into two extents, zeroout the second half.
  2505. * 4. split the extent into two extents with out zeroout.
  2506. */
  2507. split_map.m_lblk = map->m_lblk;
  2508. split_map.m_len = map->m_len;
  2509. if (allocated > map->m_len) {
  2510. if (allocated <= EXT4_EXT_ZERO_LEN &&
  2511. (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
  2512. /* case 3 */
  2513. zero_ex.ee_block =
  2514. cpu_to_le32(map->m_lblk);
  2515. zero_ex.ee_len = cpu_to_le16(allocated);
  2516. ext4_ext_store_pblock(&zero_ex,
  2517. ext4_ext_pblock(ex) + map->m_lblk - ee_block);
  2518. err = ext4_ext_zeroout(inode, &zero_ex);
  2519. if (err)
  2520. goto out;
  2521. split_map.m_lblk = map->m_lblk;
  2522. split_map.m_len = allocated;
  2523. } else if ((map->m_lblk - ee_block + map->m_len <
  2524. EXT4_EXT_ZERO_LEN) &&
  2525. (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
  2526. /* case 2 */
  2527. if (map->m_lblk != ee_block) {
  2528. zero_ex.ee_block = ex->ee_block;
  2529. zero_ex.ee_len = cpu_to_le16(map->m_lblk -
  2530. ee_block);
  2531. ext4_ext_store_pblock(&zero_ex,
  2532. ext4_ext_pblock(ex));
  2533. err = ext4_ext_zeroout(inode, &zero_ex);
  2534. if (err)
  2535. goto out;
  2536. }
  2537. split_map.m_lblk = ee_block;
  2538. split_map.m_len = map->m_lblk - ee_block + map->m_len;
  2539. allocated = map->m_len;
  2540. }
  2541. }
  2542. allocated = ext4_split_extent(handle, inode, path,
  2543. &split_map, split_flag, 0);
  2544. if (allocated < 0)
  2545. err = allocated;
  2546. out:
  2547. return err ? err : allocated;
  2548. }
  2549. /*
  2550. * This function is called by ext4_ext_map_blocks() from
  2551. * ext4_get_blocks_dio_write() when DIO to write
  2552. * to an uninitialized extent.
  2553. *
  2554. * Writing to an uninitialized extent may result in splitting the uninitialized
  2555. * extent into multiple /initialized uninitialized extents (up to three)
  2556. * There are three possibilities:
  2557. * a> There is no split required: Entire extent should be uninitialized
  2558. * b> Splits in two extents: Write is happening at either end of the extent
  2559. * c> Splits in three extents: Somone is writing in middle of the extent
  2560. *
  2561. * One of more index blocks maybe needed if the extent tree grow after
  2562. * the uninitialized extent split. To prevent ENOSPC occur at the IO
  2563. * complete, we need to split the uninitialized extent before DIO submit
  2564. * the IO. The uninitialized extent called at this time will be split
  2565. * into three uninitialized extent(at most). After IO complete, the part
  2566. * being filled will be convert to initialized by the end_io callback function
  2567. * via ext4_convert_unwritten_extents().
  2568. *
  2569. * Returns the size of uninitialized extent to be written on success.
  2570. */
  2571. static int ext4_split_unwritten_extents(handle_t *handle,
  2572. struct inode *inode,
  2573. struct ext4_map_blocks *map,
  2574. struct ext4_ext_path *path,
  2575. int flags)
  2576. {
  2577. ext4_lblk_t eof_block;
  2578. ext4_lblk_t ee_block;
  2579. struct ext4_extent *ex;
  2580. unsigned int ee_len;
  2581. int split_flag = 0, depth;
  2582. ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
  2583. "block %llu, max_blocks %u\n", inode->i_ino,
  2584. (unsigned long long)map->m_lblk, map->m_len);
  2585. eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
  2586. inode->i_sb->s_blocksize_bits;
  2587. if (eof_block < map->m_lblk + map->m_len)
  2588. eof_block = map->m_lblk + map->m_len;
  2589. /*
  2590. * It is safe to convert extent to initialized via explicit
  2591. * zeroout only if extent is fully insde i_size or new_size.
  2592. */
  2593. depth = ext_depth(inode);
  2594. ex = path[depth].p_ext;
  2595. ee_block = le32_to_cpu(ex->ee_block);
  2596. ee_len = ext4_ext_get_actual_len(ex);
  2597. split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
  2598. split_flag |= EXT4_EXT_MARK_UNINIT2;
  2599. flags |= EXT4_GET_BLOCKS_PRE_IO;
  2600. return ext4_split_extent(handle, inode, path, map, split_flag, flags);
  2601. }
  2602. static int ext4_convert_unwritten_extents_endio(handle_t *handle,
  2603. struct inode *inode,
  2604. struct ext4_ext_path *path)
  2605. {
  2606. struct ext4_extent *ex;
  2607. struct ext4_extent_header *eh;
  2608. int depth;
  2609. int err = 0;
  2610. depth = ext_depth(inode);
  2611. eh = path[depth].p_hdr;
  2612. ex = path[depth].p_ext;
  2613. ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
  2614. "block %llu, max_blocks %u\n", inode->i_ino,
  2615. (unsigned long long)le32_to_cpu(ex->ee_block),
  2616. ext4_ext_get_actual_len(ex));
  2617. err = ext4_ext_get_access(handle, inode, path + depth);
  2618. if (err)
  2619. goto out;
  2620. /* first mark the extent as initialized */
  2621. ext4_ext_mark_initialized(ex);
  2622. /* note: ext4_ext_correct_indexes() isn't needed here because
  2623. * borders are not changed
  2624. */
  2625. ext4_ext_try_to_merge(inode, path, ex);
  2626. /* Mark modified extent as dirty */
  2627. err = ext4_ext_dirty(handle, inode, path + depth);
  2628. out:
  2629. ext4_ext_show_leaf(inode, path);
  2630. return err;
  2631. }
  2632. static void unmap_underlying_metadata_blocks(struct block_device *bdev,
  2633. sector_t block, int count)
  2634. {
  2635. int i;
  2636. for (i = 0; i < count; i++)
  2637. unmap_underlying_metadata(bdev, block + i);
  2638. }
  2639. /*
  2640. * Handle EOFBLOCKS_FL flag, clearing it if necessary
  2641. */
  2642. static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
  2643. ext4_lblk_t lblk,
  2644. struct ext4_ext_path *path,
  2645. unsigned int len)
  2646. {
  2647. int i, depth;
  2648. struct ext4_extent_header *eh;
  2649. struct ext4_extent *last_ex;
  2650. if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
  2651. return 0;
  2652. depth = ext_depth(inode);
  2653. eh = path[depth].p_hdr;
  2654. if (unlikely(!eh->eh_entries)) {
  2655. EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
  2656. "EOFBLOCKS_FL set");
  2657. return -EIO;
  2658. }
  2659. last_ex = EXT_LAST_EXTENT(eh);
  2660. /*
  2661. * We should clear the EOFBLOCKS_FL flag if we are writing the
  2662. * last block in the last extent in the file. We test this by
  2663. * first checking to see if the caller to
  2664. * ext4_ext_get_blocks() was interested in the last block (or
  2665. * a block beyond the last block) in the current extent. If
  2666. * this turns out to be false, we can bail out from this
  2667. * function immediately.
  2668. */
  2669. if (lblk + len < le32_to_cpu(last_ex->ee_block) +
  2670. ext4_ext_get_actual_len(last_ex))
  2671. return 0;
  2672. /*
  2673. * If the caller does appear to be planning to write at or
  2674. * beyond the end of the current extent, we then test to see
  2675. * if the current extent is the last extent in the file, by
  2676. * checking to make sure it was reached via the rightmost node
  2677. * at each level of the tree.
  2678. */
  2679. for (i = depth-1; i >= 0; i--)
  2680. if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
  2681. return 0;
  2682. ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
  2683. return ext4_mark_inode_dirty(handle, inode);
  2684. }
  2685. static int
  2686. ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
  2687. struct ext4_map_blocks *map,
  2688. struct ext4_ext_path *path, int flags,
  2689. unsigned int allocated, ext4_fsblk_t newblock)
  2690. {
  2691. int ret = 0;
  2692. int err = 0;
  2693. ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
  2694. ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
  2695. "block %llu, max_blocks %u, flags %d, allocated %u",
  2696. inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
  2697. flags, allocated);
  2698. ext4_ext_show_leaf(inode, path);
  2699. /* get_block() before submit the IO, split the extent */
  2700. if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
  2701. ret = ext4_split_unwritten_extents(handle, inode, map,
  2702. path, flags);
  2703. /*
  2704. * Flag the inode(non aio case) or end_io struct (aio case)
  2705. * that this IO needs to conversion to written when IO is
  2706. * completed
  2707. */
  2708. if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
  2709. io->flag = EXT4_IO_END_UNWRITTEN;
  2710. atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
  2711. } else
  2712. ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
  2713. if (ext4_should_dioread_nolock(inode))
  2714. map->m_flags |= EXT4_MAP_UNINIT;
  2715. goto out;
  2716. }
  2717. /* IO end_io complete, convert the filled extent to written */
  2718. if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
  2719. ret = ext4_convert_unwritten_extents_endio(handle, inode,
  2720. path);
  2721. if (ret >= 0) {
  2722. ext4_update_inode_fsync_trans(handle, inode, 1);
  2723. err = check_eofblocks_fl(handle, inode, map->m_lblk,
  2724. path, map->m_len);
  2725. } else
  2726. err = ret;
  2727. goto out2;
  2728. }
  2729. /* buffered IO case */
  2730. /*
  2731. * repeat fallocate creation request
  2732. * we already have an unwritten extent
  2733. */
  2734. if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
  2735. goto map_out;
  2736. /* buffered READ or buffered write_begin() lookup */
  2737. if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
  2738. /*
  2739. * We have blocks reserved already. We
  2740. * return allocated blocks so that delalloc
  2741. * won't do block reservation for us. But
  2742. * the buffer head will be unmapped so that
  2743. * a read from the block returns 0s.
  2744. */
  2745. map->m_flags |= EXT4_MAP_UNWRITTEN;
  2746. goto out1;
  2747. }
  2748. /* buffered write, writepage time, convert*/
  2749. ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
  2750. if (ret >= 0) {
  2751. ext4_update_inode_fsync_trans(handle, inode, 1);
  2752. err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
  2753. map->m_len);
  2754. if (err < 0)
  2755. goto out2;
  2756. }
  2757. out:
  2758. if (ret <= 0) {
  2759. err = ret;
  2760. goto out2;
  2761. } else
  2762. allocated = ret;
  2763. map->m_flags |= EXT4_MAP_NEW;
  2764. /*
  2765. * if we allocated more blocks than requested
  2766. * we need to make sure we unmap the extra block
  2767. * allocated. The actual needed block will get
  2768. * unmapped later when we find the buffer_head marked
  2769. * new.
  2770. */
  2771. if (allocated > map->m_len) {
  2772. unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
  2773. newblock + map->m_len,
  2774. allocated - map->m_len);
  2775. allocated = map->m_len;
  2776. }
  2777. /*
  2778. * If we have done fallocate with the offset that is already
  2779. * delayed allocated, we would have block reservation
  2780. * and quota reservation done in the delayed write path.
  2781. * But fallocate would have already updated quota and block
  2782. * count for this offset. So cancel these reservation
  2783. */
  2784. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
  2785. ext4_da_update_reserve_space(inode, allocated, 0);
  2786. map_out:
  2787. map->m_flags |= EXT4_MAP_MAPPED;
  2788. out1:
  2789. if (allocated > map->m_len)
  2790. allocated = map->m_len;
  2791. ext4_ext_show_leaf(inode, path);
  2792. map->m_pblk = newblock;
  2793. map->m_len = allocated;
  2794. out2:
  2795. if (path) {
  2796. ext4_ext_drop_refs(path);
  2797. kfree(path);
  2798. }
  2799. return err ? err : allocated;
  2800. }
  2801. /*
  2802. * Block allocation/map/preallocation routine for extents based files
  2803. *
  2804. *
  2805. * Need to be called with
  2806. * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
  2807. * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
  2808. *
  2809. * return > 0, number of of blocks already mapped/allocated
  2810. * if create == 0 and these are pre-allocated blocks
  2811. * buffer head is unmapped
  2812. * otherwise blocks are mapped
  2813. *
  2814. * return = 0, if plain look up failed (blocks have not been allocated)
  2815. * buffer head is unmapped
  2816. *
  2817. * return < 0, error case.
  2818. */
  2819. int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
  2820. struct ext4_map_blocks *map, int flags)
  2821. {
  2822. struct ext4_ext_path *path = NULL;
  2823. struct ext4_extent newex, *ex;
  2824. ext4_fsblk_t newblock = 0;
  2825. int err = 0, depth, ret;
  2826. unsigned int allocated = 0;
  2827. struct ext4_allocation_request ar;
  2828. ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
  2829. ext_debug("blocks %u/%u requested for inode %lu\n",
  2830. map->m_lblk, map->m_len, inode->i_ino);
  2831. trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
  2832. /* check in cache */
  2833. if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
  2834. if (!newex.ee_start_lo && !newex.ee_start_hi) {
  2835. if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
  2836. /*
  2837. * block isn't allocated yet and
  2838. * user doesn't want to allocate it
  2839. */
  2840. goto out2;
  2841. }
  2842. /* we should allocate requested block */
  2843. } else {
  2844. /* block is already allocated */
  2845. newblock = map->m_lblk
  2846. - le32_to_cpu(newex.ee_block)
  2847. + ext4_ext_pblock(&newex);
  2848. /* number of remaining blocks in the extent */
  2849. allocated = ext4_ext_get_actual_len(&newex) -
  2850. (map->m_lblk - le32_to_cpu(newex.ee_block));
  2851. goto out;
  2852. }
  2853. }
  2854. /* find extent for this block */
  2855. path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
  2856. if (IS_ERR(path)) {
  2857. err = PTR_ERR(path);
  2858. path = NULL;
  2859. goto out2;
  2860. }
  2861. depth = ext_depth(inode);
  2862. /*
  2863. * consistent leaf must not be empty;
  2864. * this situation is possible, though, _during_ tree modification;
  2865. * this is why assert can't be put in ext4_ext_find_extent()
  2866. */
  2867. if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
  2868. EXT4_ERROR_INODE(inode, "bad extent address "
  2869. "lblock: %lu, depth: %d pblock %lld",
  2870. (unsigned long) map->m_lblk, depth,
  2871. path[depth].p_block);
  2872. err = -EIO;
  2873. goto out2;
  2874. }
  2875. ex = path[depth].p_ext;
  2876. if (ex) {
  2877. ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
  2878. ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
  2879. unsigned short ee_len;
  2880. /*
  2881. * Uninitialized extents are treated as holes, except that
  2882. * we split out initialized portions during a write.
  2883. */
  2884. ee_len = ext4_ext_get_actual_len(ex);
  2885. /* if found extent covers block, simply return it */
  2886. if (in_range(map->m_lblk, ee_block, ee_len)) {
  2887. newblock = map->m_lblk - ee_block + ee_start;
  2888. /* number of remaining blocks in the extent */
  2889. allocated = ee_len - (map->m_lblk - ee_block);
  2890. ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
  2891. ee_block, ee_len, newblock);
  2892. /* Do not put uninitialized extent in the cache */
  2893. if (!ext4_ext_is_uninitialized(ex)) {
  2894. ext4_ext_put_in_cache(inode, ee_block,
  2895. ee_len, ee_start);
  2896. goto out;
  2897. }
  2898. ret = ext4_ext_handle_uninitialized_extents(handle,
  2899. inode, map, path, flags, allocated,
  2900. newblock);
  2901. return ret;
  2902. }
  2903. }
  2904. /*
  2905. * requested block isn't allocated yet;
  2906. * we couldn't try to create block if create flag is zero
  2907. */
  2908. if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
  2909. /*
  2910. * put just found gap into cache to speed up
  2911. * subsequent requests
  2912. */
  2913. ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
  2914. goto out2;
  2915. }
  2916. /*
  2917. * Okay, we need to do block allocation.
  2918. */
  2919. /* find neighbour allocated blocks */
  2920. ar.lleft = map->m_lblk;
  2921. err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
  2922. if (err)
  2923. goto out2;
  2924. ar.lright = map->m_lblk;
  2925. err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
  2926. if (err)
  2927. goto out2;
  2928. /*
  2929. * See if request is beyond maximum number of blocks we can have in
  2930. * a single extent. For an initialized extent this limit is
  2931. * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
  2932. * EXT_UNINIT_MAX_LEN.
  2933. */
  2934. if (map->m_len > EXT_INIT_MAX_LEN &&
  2935. !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
  2936. map->m_len = EXT_INIT_MAX_LEN;
  2937. else if (map->m_len > EXT_UNINIT_MAX_LEN &&
  2938. (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
  2939. map->m_len = EXT_UNINIT_MAX_LEN;
  2940. /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
  2941. newex.ee_block = cpu_to_le32(map->m_lblk);
  2942. newex.ee_len = cpu_to_le16(map->m_len);
  2943. err = ext4_ext_check_overlap(inode, &newex, path);
  2944. if (err)
  2945. allocated = ext4_ext_get_actual_len(&newex);
  2946. else
  2947. allocated = map->m_len;
  2948. /* allocate new block */
  2949. ar.inode = inode;
  2950. ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
  2951. ar.logical = map->m_lblk;
  2952. ar.len = allocated;
  2953. if (S_ISREG(inode->i_mode))
  2954. ar.flags = EXT4_MB_HINT_DATA;
  2955. else
  2956. /* disable in-core preallocation for non-regular files */
  2957. ar.flags = 0;
  2958. newblock = ext4_mb_new_blocks(handle, &ar, &err);
  2959. if (!newblock)
  2960. goto out2;
  2961. ext_debug("allocate new block: goal %llu, found %llu/%u\n",
  2962. ar.goal, newblock, allocated);
  2963. /* try to insert new extent into found leaf and return */
  2964. ext4_ext_store_pblock(&newex, newblock);
  2965. newex.ee_len = cpu_to_le16(ar.len);
  2966. /* Mark uninitialized */
  2967. if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
  2968. ext4_ext_mark_uninitialized(&newex);
  2969. /*
  2970. * io_end structure was created for every IO write to an
  2971. * uninitialized extent. To avoid unnecessary conversion,
  2972. * here we flag the IO that really needs the conversion.
  2973. * For non asycn direct IO case, flag the inode state
  2974. * that we need to perform conversion when IO is done.
  2975. */
  2976. if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
  2977. if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
  2978. io->flag = EXT4_IO_END_UNWRITTEN;
  2979. atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
  2980. } else
  2981. ext4_set_inode_state(inode,
  2982. EXT4_STATE_DIO_UNWRITTEN);
  2983. }
  2984. if (ext4_should_dioread_nolock(inode))
  2985. map->m_flags |= EXT4_MAP_UNINIT;
  2986. }
  2987. err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
  2988. if (err)
  2989. goto out2;
  2990. err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
  2991. if (err) {
  2992. /* free data blocks we just allocated */
  2993. /* not a good idea to call discard here directly,
  2994. * but otherwise we'd need to call it every free() */
  2995. ext4_discard_preallocations(inode);
  2996. ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
  2997. ext4_ext_get_actual_len(&newex), 0);
  2998. goto out2;
  2999. }
  3000. /* previous routine could use block we allocated */
  3001. newblock = ext4_ext_pblock(&newex);
  3002. allocated = ext4_ext_get_actual_len(&newex);
  3003. if (allocated > map->m_len)
  3004. allocated = map->m_len;
  3005. map->m_flags |= EXT4_MAP_NEW;
  3006. /*
  3007. * Update reserved blocks/metadata blocks after successful
  3008. * block allocation which had been deferred till now.
  3009. */
  3010. if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
  3011. ext4_da_update_reserve_space(inode, allocated, 1);
  3012. /*
  3013. * Cache the extent and update transaction to commit on fdatasync only
  3014. * when it is _not_ an uninitialized extent.
  3015. */
  3016. if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
  3017. ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
  3018. ext4_update_inode_fsync_trans(handle, inode, 1);
  3019. } else
  3020. ext4_update_inode_fsync_trans(handle, inode, 0);
  3021. out:
  3022. if (allocated > map->m_len)
  3023. allocated = map->m_len;
  3024. ext4_ext_show_leaf(inode, path);
  3025. map->m_flags |= EXT4_MAP_MAPPED;
  3026. map->m_pblk = newblock;
  3027. map->m_len = allocated;
  3028. out2:
  3029. if (path) {
  3030. ext4_ext_drop_refs(path);
  3031. kfree(path);
  3032. }
  3033. trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
  3034. newblock, map->m_len, err ? err : allocated);
  3035. return err ? err : allocated;
  3036. }
  3037. void ext4_ext_truncate(struct inode *inode)
  3038. {
  3039. struct address_space *mapping = inode->i_mapping;
  3040. struct super_block *sb = inode->i_sb;
  3041. ext4_lblk_t last_block;
  3042. handle_t *handle;
  3043. int err = 0;
  3044. /*
  3045. * finish any pending end_io work so we won't run the risk of
  3046. * converting any truncated blocks to initialized later
  3047. */
  3048. ext4_flush_completed_IO(inode);
  3049. /*
  3050. * probably first extent we're gonna free will be last in block
  3051. */
  3052. err = ext4_writepage_trans_blocks(inode);
  3053. handle = ext4_journal_start(inode, err);
  3054. if (IS_ERR(handle))
  3055. return;
  3056. if (inode->i_size & (sb->s_blocksize - 1))
  3057. ext4_block_truncate_page(handle, mapping, inode->i_size);
  3058. if (ext4_orphan_add(handle, inode))
  3059. goto out_stop;
  3060. down_write(&EXT4_I(inode)->i_data_sem);
  3061. ext4_ext_invalidate_cache(inode);
  3062. ext4_discard_preallocations(inode);
  3063. /*
  3064. * TODO: optimization is possible here.
  3065. * Probably we need not scan at all,
  3066. * because page truncation is enough.
  3067. */
  3068. /* we have to know where to truncate from in crash case */
  3069. EXT4_I(inode)->i_disksize = inode->i_size;
  3070. ext4_mark_inode_dirty(handle, inode);
  3071. last_block = (inode->i_size + sb->s_blocksize - 1)
  3072. >> EXT4_BLOCK_SIZE_BITS(sb);
  3073. err = ext4_ext_remove_space(inode, last_block);
  3074. /* In a multi-transaction truncate, we only make the final
  3075. * transaction synchronous.
  3076. */
  3077. if (IS_SYNC(inode))
  3078. ext4_handle_sync(handle);
  3079. up_write(&EXT4_I(inode)->i_data_sem);
  3080. out_stop:
  3081. /*
  3082. * If this was a simple ftruncate() and the file will remain alive,
  3083. * then we need to clear up the orphan record which we created above.
  3084. * However, if this was a real unlink then we were called by
  3085. * ext4_delete_inode(), and we allow that function to clean up the
  3086. * orphan info for us.
  3087. */
  3088. if (inode->i_nlink)
  3089. ext4_orphan_del(handle, inode);
  3090. inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
  3091. ext4_mark_inode_dirty(handle, inode);
  3092. ext4_journal_stop(handle);
  3093. }
  3094. static void ext4_falloc_update_inode(struct inode *inode,
  3095. int mode, loff_t new_size, int update_ctime)
  3096. {
  3097. struct timespec now;
  3098. if (update_ctime) {
  3099. now = current_fs_time(inode->i_sb);
  3100. if (!timespec_equal(&inode->i_ctime, &now))
  3101. inode->i_ctime = now;
  3102. }
  3103. /*
  3104. * Update only when preallocation was requested beyond
  3105. * the file size.
  3106. */
  3107. if (!(mode & FALLOC_FL_KEEP_SIZE)) {
  3108. if (new_size > i_size_read(inode))
  3109. i_size_write(inode, new_size);
  3110. if (new_size > EXT4_I(inode)->i_disksize)
  3111. ext4_update_i_disksize(inode, new_size);
  3112. } else {
  3113. /*
  3114. * Mark that we allocate beyond EOF so the subsequent truncate
  3115. * can proceed even if the new size is the same as i_size.
  3116. */
  3117. if (new_size > i_size_read(inode))
  3118. ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
  3119. }
  3120. }
  3121. /*
  3122. * preallocate space for a file. This implements ext4's fallocate file
  3123. * operation, which gets called from sys_fallocate system call.
  3124. * For block-mapped files, posix_fallocate should fall back to the method
  3125. * of writing zeroes to the required new blocks (the same behavior which is
  3126. * expected for file systems which do not support fallocate() system call).
  3127. */
  3128. long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
  3129. {
  3130. struct inode *inode = file->f_path.dentry->d_inode;
  3131. handle_t *handle;
  3132. loff_t new_size;
  3133. unsigned int max_blocks;
  3134. int ret = 0;
  3135. int ret2 = 0;
  3136. int retries = 0;
  3137. struct ext4_map_blocks map;
  3138. unsigned int credits, blkbits = inode->i_blkbits;
  3139. /* We only support the FALLOC_FL_KEEP_SIZE mode */
  3140. if (mode & ~FALLOC_FL_KEEP_SIZE)
  3141. return -EOPNOTSUPP;
  3142. /*
  3143. * currently supporting (pre)allocate mode for extent-based
  3144. * files _only_
  3145. */
  3146. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  3147. return -EOPNOTSUPP;
  3148. trace_ext4_fallocate_enter(inode, offset, len, mode);
  3149. map.m_lblk = offset >> blkbits;
  3150. /*
  3151. * We can't just convert len to max_blocks because
  3152. * If blocksize = 4096 offset = 3072 and len = 2048
  3153. */
  3154. max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
  3155. - map.m_lblk;
  3156. /*
  3157. * credits to insert 1 extent into extent tree
  3158. */
  3159. credits = ext4_chunk_trans_blocks(inode, max_blocks);
  3160. mutex_lock(&inode->i_mutex);
  3161. ret = inode_newsize_ok(inode, (len + offset));
  3162. if (ret) {
  3163. mutex_unlock(&inode->i_mutex);
  3164. trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
  3165. return ret;
  3166. }
  3167. retry:
  3168. while (ret >= 0 && ret < max_blocks) {
  3169. map.m_lblk = map.m_lblk + ret;
  3170. map.m_len = max_blocks = max_blocks - ret;
  3171. handle = ext4_journal_start(inode, credits);
  3172. if (IS_ERR(handle)) {
  3173. ret = PTR_ERR(handle);
  3174. break;
  3175. }
  3176. ret = ext4_map_blocks(handle, inode, &map,
  3177. EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
  3178. if (ret <= 0) {
  3179. #ifdef EXT4FS_DEBUG
  3180. WARN_ON(ret <= 0);
  3181. printk(KERN_ERR "%s: ext4_ext_map_blocks "
  3182. "returned error inode#%lu, block=%u, "
  3183. "max_blocks=%u", __func__,
  3184. inode->i_ino, map.m_lblk, max_blocks);
  3185. #endif
  3186. ext4_mark_inode_dirty(handle, inode);
  3187. ret2 = ext4_journal_stop(handle);
  3188. break;
  3189. }
  3190. if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
  3191. blkbits) >> blkbits))
  3192. new_size = offset + len;
  3193. else
  3194. new_size = (map.m_lblk + ret) << blkbits;
  3195. ext4_falloc_update_inode(inode, mode, new_size,
  3196. (map.m_flags & EXT4_MAP_NEW));
  3197. ext4_mark_inode_dirty(handle, inode);
  3198. ret2 = ext4_journal_stop(handle);
  3199. if (ret2)
  3200. break;
  3201. }
  3202. if (ret == -ENOSPC &&
  3203. ext4_should_retry_alloc(inode->i_sb, &retries)) {
  3204. ret = 0;
  3205. goto retry;
  3206. }
  3207. mutex_unlock(&inode->i_mutex);
  3208. trace_ext4_fallocate_exit(inode, offset, max_blocks,
  3209. ret > 0 ? ret2 : ret);
  3210. return ret > 0 ? ret2 : ret;
  3211. }
  3212. /*
  3213. * This function convert a range of blocks to written extents
  3214. * The caller of this function will pass the start offset and the size.
  3215. * all unwritten extents within this range will be converted to
  3216. * written extents.
  3217. *
  3218. * This function is called from the direct IO end io call back
  3219. * function, to convert the fallocated extents after IO is completed.
  3220. * Returns 0 on success.
  3221. */
  3222. int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
  3223. ssize_t len)
  3224. {
  3225. handle_t *handle;
  3226. unsigned int max_blocks;
  3227. int ret = 0;
  3228. int ret2 = 0;
  3229. struct ext4_map_blocks map;
  3230. unsigned int credits, blkbits = inode->i_blkbits;
  3231. map.m_lblk = offset >> blkbits;
  3232. /*
  3233. * We can't just convert len to max_blocks because
  3234. * If blocksize = 4096 offset = 3072 and len = 2048
  3235. */
  3236. max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
  3237. map.m_lblk);
  3238. /*
  3239. * credits to insert 1 extent into extent tree
  3240. */
  3241. credits = ext4_chunk_trans_blocks(inode, max_blocks);
  3242. while (ret >= 0 && ret < max_blocks) {
  3243. map.m_lblk += ret;
  3244. map.m_len = (max_blocks -= ret);
  3245. handle = ext4_journal_start(inode, credits);
  3246. if (IS_ERR(handle)) {
  3247. ret = PTR_ERR(handle);
  3248. break;
  3249. }
  3250. ret = ext4_map_blocks(handle, inode, &map,
  3251. EXT4_GET_BLOCKS_IO_CONVERT_EXT);
  3252. if (ret <= 0) {
  3253. WARN_ON(ret <= 0);
  3254. printk(KERN_ERR "%s: ext4_ext_map_blocks "
  3255. "returned error inode#%lu, block=%u, "
  3256. "max_blocks=%u", __func__,
  3257. inode->i_ino, map.m_lblk, map.m_len);
  3258. }
  3259. ext4_mark_inode_dirty(handle, inode);
  3260. ret2 = ext4_journal_stop(handle);
  3261. if (ret <= 0 || ret2 )
  3262. break;
  3263. }
  3264. return ret > 0 ? ret2 : ret;
  3265. }
  3266. /*
  3267. * Callback function called for each extent to gather FIEMAP information.
  3268. */
  3269. static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
  3270. struct ext4_ext_cache *newex, struct ext4_extent *ex,
  3271. void *data)
  3272. {
  3273. __u64 logical;
  3274. __u64 physical;
  3275. __u64 length;
  3276. loff_t size;
  3277. __u32 flags = 0;
  3278. int ret = 0;
  3279. struct fiemap_extent_info *fieinfo = data;
  3280. unsigned char blksize_bits;
  3281. blksize_bits = inode->i_sb->s_blocksize_bits;
  3282. logical = (__u64)newex->ec_block << blksize_bits;
  3283. if (newex->ec_start == 0) {
  3284. /*
  3285. * No extent in extent-tree contains block @newex->ec_start,
  3286. * then the block may stay in 1)a hole or 2)delayed-extent.
  3287. *
  3288. * Holes or delayed-extents are processed as follows.
  3289. * 1. lookup dirty pages with specified range in pagecache.
  3290. * If no page is got, then there is no delayed-extent and
  3291. * return with EXT_CONTINUE.
  3292. * 2. find the 1st mapped buffer,
  3293. * 3. check if the mapped buffer is both in the request range
  3294. * and a delayed buffer. If not, there is no delayed-extent,
  3295. * then return.
  3296. * 4. a delayed-extent is found, the extent will be collected.
  3297. */
  3298. ext4_lblk_t end = 0;
  3299. pgoff_t last_offset;
  3300. pgoff_t offset;
  3301. pgoff_t index;
  3302. pgoff_t start_index = 0;
  3303. struct page **pages = NULL;
  3304. struct buffer_head *bh = NULL;
  3305. struct buffer_head *head = NULL;
  3306. unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
  3307. pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3308. if (pages == NULL)
  3309. return -ENOMEM;
  3310. offset = logical >> PAGE_SHIFT;
  3311. repeat:
  3312. last_offset = offset;
  3313. head = NULL;
  3314. ret = find_get_pages_tag(inode->i_mapping, &offset,
  3315. PAGECACHE_TAG_DIRTY, nr_pages, pages);
  3316. if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
  3317. /* First time, try to find a mapped buffer. */
  3318. if (ret == 0) {
  3319. out:
  3320. for (index = 0; index < ret; index++)
  3321. page_cache_release(pages[index]);
  3322. /* just a hole. */
  3323. kfree(pages);
  3324. return EXT_CONTINUE;
  3325. }
  3326. index = 0;
  3327. next_page:
  3328. /* Try to find the 1st mapped buffer. */
  3329. end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
  3330. blksize_bits;
  3331. if (!page_has_buffers(pages[index]))
  3332. goto out;
  3333. head = page_buffers(pages[index]);
  3334. if (!head)
  3335. goto out;
  3336. index++;
  3337. bh = head;
  3338. do {
  3339. if (end >= newex->ec_block +
  3340. newex->ec_len)
  3341. /* The buffer is out of
  3342. * the request range.
  3343. */
  3344. goto out;
  3345. if (buffer_mapped(bh) &&
  3346. end >= newex->ec_block) {
  3347. start_index = index - 1;
  3348. /* get the 1st mapped buffer. */
  3349. goto found_mapped_buffer;
  3350. }
  3351. bh = bh->b_this_page;
  3352. end++;
  3353. } while (bh != head);
  3354. /* No mapped buffer in the range found in this page,
  3355. * We need to look up next page.
  3356. */
  3357. if (index >= ret) {
  3358. /* There is no page left, but we need to limit
  3359. * newex->ec_len.
  3360. */
  3361. newex->ec_len = end - newex->ec_block;
  3362. goto out;
  3363. }
  3364. goto next_page;
  3365. } else {
  3366. /*Find contiguous delayed buffers. */
  3367. if (ret > 0 && pages[0]->index == last_offset)
  3368. head = page_buffers(pages[0]);
  3369. bh = head;
  3370. index = 1;
  3371. start_index = 0;
  3372. }
  3373. found_mapped_buffer:
  3374. if (bh != NULL && buffer_delay(bh)) {
  3375. /* 1st or contiguous delayed buffer found. */
  3376. if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
  3377. /*
  3378. * 1st delayed buffer found, record
  3379. * the start of extent.
  3380. */
  3381. flags |= FIEMAP_EXTENT_DELALLOC;
  3382. newex->ec_block = end;
  3383. logical = (__u64)end << blksize_bits;
  3384. }
  3385. /* Find contiguous delayed buffers. */
  3386. do {
  3387. if (!buffer_delay(bh))
  3388. goto found_delayed_extent;
  3389. bh = bh->b_this_page;
  3390. end++;
  3391. } while (bh != head);
  3392. for (; index < ret; index++) {
  3393. if (!page_has_buffers(pages[index])) {
  3394. bh = NULL;
  3395. break;
  3396. }
  3397. head = page_buffers(pages[index]);
  3398. if (!head) {
  3399. bh = NULL;
  3400. break;
  3401. }
  3402. if (pages[index]->index !=
  3403. pages[start_index]->index + index
  3404. - start_index) {
  3405. /* Blocks are not contiguous. */
  3406. bh = NULL;
  3407. break;
  3408. }
  3409. bh = head;
  3410. do {
  3411. if (!buffer_delay(bh))
  3412. /* Delayed-extent ends. */
  3413. goto found_delayed_extent;
  3414. bh = bh->b_this_page;
  3415. end++;
  3416. } while (bh != head);
  3417. }
  3418. } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
  3419. /* a hole found. */
  3420. goto out;
  3421. found_delayed_extent:
  3422. newex->ec_len = min(end - newex->ec_block,
  3423. (ext4_lblk_t)EXT_INIT_MAX_LEN);
  3424. if (ret == nr_pages && bh != NULL &&
  3425. newex->ec_len < EXT_INIT_MAX_LEN &&
  3426. buffer_delay(bh)) {
  3427. /* Have not collected an extent and continue. */
  3428. for (index = 0; index < ret; index++)
  3429. page_cache_release(pages[index]);
  3430. goto repeat;
  3431. }
  3432. for (index = 0; index < ret; index++)
  3433. page_cache_release(pages[index]);
  3434. kfree(pages);
  3435. }
  3436. physical = (__u64)newex->ec_start << blksize_bits;
  3437. length = (__u64)newex->ec_len << blksize_bits;
  3438. if (ex && ext4_ext_is_uninitialized(ex))
  3439. flags |= FIEMAP_EXTENT_UNWRITTEN;
  3440. size = i_size_read(inode);
  3441. if (logical + length >= size)
  3442. flags |= FIEMAP_EXTENT_LAST;
  3443. ret = fiemap_fill_next_extent(fieinfo, logical, physical,
  3444. length, flags);
  3445. if (ret < 0)
  3446. return ret;
  3447. if (ret == 1)
  3448. return EXT_BREAK;
  3449. return EXT_CONTINUE;
  3450. }
  3451. /* fiemap flags we can handle specified here */
  3452. #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
  3453. static int ext4_xattr_fiemap(struct inode *inode,
  3454. struct fiemap_extent_info *fieinfo)
  3455. {
  3456. __u64 physical = 0;
  3457. __u64 length;
  3458. __u32 flags = FIEMAP_EXTENT_LAST;
  3459. int blockbits = inode->i_sb->s_blocksize_bits;
  3460. int error = 0;
  3461. /* in-inode? */
  3462. if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
  3463. struct ext4_iloc iloc;
  3464. int offset; /* offset of xattr in inode */
  3465. error = ext4_get_inode_loc(inode, &iloc);
  3466. if (error)
  3467. return error;
  3468. physical = iloc.bh->b_blocknr << blockbits;
  3469. offset = EXT4_GOOD_OLD_INODE_SIZE +
  3470. EXT4_I(inode)->i_extra_isize;
  3471. physical += offset;
  3472. length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
  3473. flags |= FIEMAP_EXTENT_DATA_INLINE;
  3474. brelse(iloc.bh);
  3475. } else { /* external block */
  3476. physical = EXT4_I(inode)->i_file_acl << blockbits;
  3477. length = inode->i_sb->s_blocksize;
  3478. }
  3479. if (physical)
  3480. error = fiemap_fill_next_extent(fieinfo, 0, physical,
  3481. length, flags);
  3482. return (error < 0 ? error : 0);
  3483. }
  3484. int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  3485. __u64 start, __u64 len)
  3486. {
  3487. ext4_lblk_t start_blk;
  3488. int error = 0;
  3489. /* fallback to generic here if not in extents fmt */
  3490. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  3491. return generic_block_fiemap(inode, fieinfo, start, len,
  3492. ext4_get_block);
  3493. if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
  3494. return -EBADR;
  3495. if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
  3496. error = ext4_xattr_fiemap(inode, fieinfo);
  3497. } else {
  3498. ext4_lblk_t len_blks;
  3499. __u64 last_blk;
  3500. start_blk = start >> inode->i_sb->s_blocksize_bits;
  3501. last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
  3502. if (last_blk >= EXT_MAX_BLOCK)
  3503. last_blk = EXT_MAX_BLOCK-1;
  3504. len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
  3505. /*
  3506. * Walk the extent tree gathering extent information.
  3507. * ext4_ext_fiemap_cb will push extents back to user.
  3508. */
  3509. error = ext4_ext_walk_space(inode, start_blk, len_blks,
  3510. ext4_ext_fiemap_cb, fieinfo);
  3511. }
  3512. return error;
  3513. }