segment.c 75 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859
  1. /*
  2. * segment.c - NILFS segment constructor.
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Ryusuke Konishi <ryusuke@osrg.net>
  21. *
  22. */
  23. #include <linux/pagemap.h>
  24. #include <linux/buffer_head.h>
  25. #include <linux/writeback.h>
  26. #include <linux/bio.h>
  27. #include <linux/completion.h>
  28. #include <linux/blkdev.h>
  29. #include <linux/backing-dev.h>
  30. #include <linux/freezer.h>
  31. #include <linux/kthread.h>
  32. #include <linux/crc32.h>
  33. #include <linux/pagevec.h>
  34. #include <linux/slab.h>
  35. #include "nilfs.h"
  36. #include "btnode.h"
  37. #include "page.h"
  38. #include "segment.h"
  39. #include "sufile.h"
  40. #include "cpfile.h"
  41. #include "ifile.h"
  42. #include "segbuf.h"
  43. /*
  44. * Segment constructor
  45. */
  46. #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
  47. #define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments
  48. appended in collection retry loop */
  49. /* Construction mode */
  50. enum {
  51. SC_LSEG_SR = 1, /* Make a logical segment having a super root */
  52. SC_LSEG_DSYNC, /* Flush data blocks of a given file and make
  53. a logical segment without a super root */
  54. SC_FLUSH_FILE, /* Flush data files, leads to segment writes without
  55. creating a checkpoint */
  56. SC_FLUSH_DAT, /* Flush DAT file. This also creates segments without
  57. a checkpoint */
  58. };
  59. /* Stage numbers of dirty block collection */
  60. enum {
  61. NILFS_ST_INIT = 0,
  62. NILFS_ST_GC, /* Collecting dirty blocks for GC */
  63. NILFS_ST_FILE,
  64. NILFS_ST_IFILE,
  65. NILFS_ST_CPFILE,
  66. NILFS_ST_SUFILE,
  67. NILFS_ST_DAT,
  68. NILFS_ST_SR, /* Super root */
  69. NILFS_ST_DSYNC, /* Data sync blocks */
  70. NILFS_ST_DONE,
  71. };
  72. /* State flags of collection */
  73. #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
  74. #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
  75. #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
  76. #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
  77. /* Operations depending on the construction mode and file type */
  78. struct nilfs_sc_operations {
  79. int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
  80. struct inode *);
  81. int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
  82. struct inode *);
  83. int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
  84. struct inode *);
  85. void (*write_data_binfo)(struct nilfs_sc_info *,
  86. struct nilfs_segsum_pointer *,
  87. union nilfs_binfo *);
  88. void (*write_node_binfo)(struct nilfs_sc_info *,
  89. struct nilfs_segsum_pointer *,
  90. union nilfs_binfo *);
  91. };
  92. /*
  93. * Other definitions
  94. */
  95. static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
  96. static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
  97. static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
  98. static void nilfs_dispose_list(struct nilfs_sb_info *, struct list_head *,
  99. int);
  100. #define nilfs_cnt32_gt(a, b) \
  101. (typecheck(__u32, a) && typecheck(__u32, b) && \
  102. ((__s32)(b) - (__s32)(a) < 0))
  103. #define nilfs_cnt32_ge(a, b) \
  104. (typecheck(__u32, a) && typecheck(__u32, b) && \
  105. ((__s32)(a) - (__s32)(b) >= 0))
  106. #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
  107. #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
  108. static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
  109. {
  110. struct nilfs_transaction_info *cur_ti = current->journal_info;
  111. void *save = NULL;
  112. if (cur_ti) {
  113. if (cur_ti->ti_magic == NILFS_TI_MAGIC)
  114. return ++cur_ti->ti_count;
  115. else {
  116. /*
  117. * If journal_info field is occupied by other FS,
  118. * it is saved and will be restored on
  119. * nilfs_transaction_commit().
  120. */
  121. printk(KERN_WARNING
  122. "NILFS warning: journal info from a different "
  123. "FS\n");
  124. save = current->journal_info;
  125. }
  126. }
  127. if (!ti) {
  128. ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
  129. if (!ti)
  130. return -ENOMEM;
  131. ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
  132. } else {
  133. ti->ti_flags = 0;
  134. }
  135. ti->ti_count = 0;
  136. ti->ti_save = save;
  137. ti->ti_magic = NILFS_TI_MAGIC;
  138. current->journal_info = ti;
  139. return 0;
  140. }
  141. /**
  142. * nilfs_transaction_begin - start indivisible file operations.
  143. * @sb: super block
  144. * @ti: nilfs_transaction_info
  145. * @vacancy_check: flags for vacancy rate checks
  146. *
  147. * nilfs_transaction_begin() acquires a reader/writer semaphore, called
  148. * the segment semaphore, to make a segment construction and write tasks
  149. * exclusive. The function is used with nilfs_transaction_commit() in pairs.
  150. * The region enclosed by these two functions can be nested. To avoid a
  151. * deadlock, the semaphore is only acquired or released in the outermost call.
  152. *
  153. * This function allocates a nilfs_transaction_info struct to keep context
  154. * information on it. It is initialized and hooked onto the current task in
  155. * the outermost call. If a pre-allocated struct is given to @ti, it is used
  156. * instead; otherwise a new struct is assigned from a slab.
  157. *
  158. * When @vacancy_check flag is set, this function will check the amount of
  159. * free space, and will wait for the GC to reclaim disk space if low capacity.
  160. *
  161. * Return Value: On success, 0 is returned. On error, one of the following
  162. * negative error code is returned.
  163. *
  164. * %-ENOMEM - Insufficient memory available.
  165. *
  166. * %-ENOSPC - No space left on device
  167. */
  168. int nilfs_transaction_begin(struct super_block *sb,
  169. struct nilfs_transaction_info *ti,
  170. int vacancy_check)
  171. {
  172. struct nilfs_sb_info *sbi;
  173. struct the_nilfs *nilfs;
  174. int ret = nilfs_prepare_segment_lock(ti);
  175. if (unlikely(ret < 0))
  176. return ret;
  177. if (ret > 0)
  178. return 0;
  179. vfs_check_frozen(sb, SB_FREEZE_WRITE);
  180. sbi = NILFS_SB(sb);
  181. nilfs = sbi->s_nilfs;
  182. down_read(&nilfs->ns_segctor_sem);
  183. if (vacancy_check && nilfs_near_disk_full(nilfs)) {
  184. up_read(&nilfs->ns_segctor_sem);
  185. ret = -ENOSPC;
  186. goto failed;
  187. }
  188. return 0;
  189. failed:
  190. ti = current->journal_info;
  191. current->journal_info = ti->ti_save;
  192. if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
  193. kmem_cache_free(nilfs_transaction_cachep, ti);
  194. return ret;
  195. }
  196. /**
  197. * nilfs_transaction_commit - commit indivisible file operations.
  198. * @sb: super block
  199. *
  200. * nilfs_transaction_commit() releases the read semaphore which is
  201. * acquired by nilfs_transaction_begin(). This is only performed
  202. * in outermost call of this function. If a commit flag is set,
  203. * nilfs_transaction_commit() sets a timer to start the segment
  204. * constructor. If a sync flag is set, it starts construction
  205. * directly.
  206. */
  207. int nilfs_transaction_commit(struct super_block *sb)
  208. {
  209. struct nilfs_transaction_info *ti = current->journal_info;
  210. struct nilfs_sb_info *sbi;
  211. struct nilfs_sc_info *sci;
  212. int err = 0;
  213. BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
  214. ti->ti_flags |= NILFS_TI_COMMIT;
  215. if (ti->ti_count > 0) {
  216. ti->ti_count--;
  217. return 0;
  218. }
  219. sbi = NILFS_SB(sb);
  220. sci = NILFS_SC(sbi);
  221. if (sci != NULL) {
  222. if (ti->ti_flags & NILFS_TI_COMMIT)
  223. nilfs_segctor_start_timer(sci);
  224. if (atomic_read(&sbi->s_nilfs->ns_ndirtyblks) >
  225. sci->sc_watermark)
  226. nilfs_segctor_do_flush(sci, 0);
  227. }
  228. up_read(&sbi->s_nilfs->ns_segctor_sem);
  229. current->journal_info = ti->ti_save;
  230. if (ti->ti_flags & NILFS_TI_SYNC)
  231. err = nilfs_construct_segment(sb);
  232. if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
  233. kmem_cache_free(nilfs_transaction_cachep, ti);
  234. return err;
  235. }
  236. void nilfs_transaction_abort(struct super_block *sb)
  237. {
  238. struct nilfs_transaction_info *ti = current->journal_info;
  239. BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
  240. if (ti->ti_count > 0) {
  241. ti->ti_count--;
  242. return;
  243. }
  244. up_read(&NILFS_SB(sb)->s_nilfs->ns_segctor_sem);
  245. current->journal_info = ti->ti_save;
  246. if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
  247. kmem_cache_free(nilfs_transaction_cachep, ti);
  248. }
  249. void nilfs_relax_pressure_in_lock(struct super_block *sb)
  250. {
  251. struct nilfs_sb_info *sbi = NILFS_SB(sb);
  252. struct nilfs_sc_info *sci = NILFS_SC(sbi);
  253. struct the_nilfs *nilfs = sbi->s_nilfs;
  254. if (!sci || !sci->sc_flush_request)
  255. return;
  256. set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
  257. up_read(&nilfs->ns_segctor_sem);
  258. down_write(&nilfs->ns_segctor_sem);
  259. if (sci->sc_flush_request &&
  260. test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
  261. struct nilfs_transaction_info *ti = current->journal_info;
  262. ti->ti_flags |= NILFS_TI_WRITER;
  263. nilfs_segctor_do_immediate_flush(sci);
  264. ti->ti_flags &= ~NILFS_TI_WRITER;
  265. }
  266. downgrade_write(&nilfs->ns_segctor_sem);
  267. }
  268. static void nilfs_transaction_lock(struct nilfs_sb_info *sbi,
  269. struct nilfs_transaction_info *ti,
  270. int gcflag)
  271. {
  272. struct nilfs_transaction_info *cur_ti = current->journal_info;
  273. WARN_ON(cur_ti);
  274. ti->ti_flags = NILFS_TI_WRITER;
  275. ti->ti_count = 0;
  276. ti->ti_save = cur_ti;
  277. ti->ti_magic = NILFS_TI_MAGIC;
  278. INIT_LIST_HEAD(&ti->ti_garbage);
  279. current->journal_info = ti;
  280. for (;;) {
  281. down_write(&sbi->s_nilfs->ns_segctor_sem);
  282. if (!test_bit(NILFS_SC_PRIOR_FLUSH, &NILFS_SC(sbi)->sc_flags))
  283. break;
  284. nilfs_segctor_do_immediate_flush(NILFS_SC(sbi));
  285. up_write(&sbi->s_nilfs->ns_segctor_sem);
  286. yield();
  287. }
  288. if (gcflag)
  289. ti->ti_flags |= NILFS_TI_GC;
  290. }
  291. static void nilfs_transaction_unlock(struct nilfs_sb_info *sbi)
  292. {
  293. struct nilfs_transaction_info *ti = current->journal_info;
  294. BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
  295. BUG_ON(ti->ti_count > 0);
  296. up_write(&sbi->s_nilfs->ns_segctor_sem);
  297. current->journal_info = ti->ti_save;
  298. if (!list_empty(&ti->ti_garbage))
  299. nilfs_dispose_list(sbi, &ti->ti_garbage, 0);
  300. }
  301. static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
  302. struct nilfs_segsum_pointer *ssp,
  303. unsigned bytes)
  304. {
  305. struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
  306. unsigned blocksize = sci->sc_super->s_blocksize;
  307. void *p;
  308. if (unlikely(ssp->offset + bytes > blocksize)) {
  309. ssp->offset = 0;
  310. BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
  311. &segbuf->sb_segsum_buffers));
  312. ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
  313. }
  314. p = ssp->bh->b_data + ssp->offset;
  315. ssp->offset += bytes;
  316. return p;
  317. }
  318. /**
  319. * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
  320. * @sci: nilfs_sc_info
  321. */
  322. static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
  323. {
  324. struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
  325. struct buffer_head *sumbh;
  326. unsigned sumbytes;
  327. unsigned flags = 0;
  328. int err;
  329. if (nilfs_doing_gc())
  330. flags = NILFS_SS_GC;
  331. err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
  332. if (unlikely(err))
  333. return err;
  334. sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
  335. sumbytes = segbuf->sb_sum.sumbytes;
  336. sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
  337. sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
  338. sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
  339. return 0;
  340. }
  341. static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
  342. {
  343. sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
  344. if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
  345. return -E2BIG; /* The current segment is filled up
  346. (internal code) */
  347. sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
  348. return nilfs_segctor_reset_segment_buffer(sci);
  349. }
  350. static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
  351. {
  352. struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
  353. int err;
  354. if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
  355. err = nilfs_segctor_feed_segment(sci);
  356. if (err)
  357. return err;
  358. segbuf = sci->sc_curseg;
  359. }
  360. err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
  361. if (likely(!err))
  362. segbuf->sb_sum.flags |= NILFS_SS_SR;
  363. return err;
  364. }
  365. /*
  366. * Functions for making segment summary and payloads
  367. */
  368. static int nilfs_segctor_segsum_block_required(
  369. struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
  370. unsigned binfo_size)
  371. {
  372. unsigned blocksize = sci->sc_super->s_blocksize;
  373. /* Size of finfo and binfo is enough small against blocksize */
  374. return ssp->offset + binfo_size +
  375. (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
  376. blocksize;
  377. }
  378. static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
  379. struct inode *inode)
  380. {
  381. sci->sc_curseg->sb_sum.nfinfo++;
  382. sci->sc_binfo_ptr = sci->sc_finfo_ptr;
  383. nilfs_segctor_map_segsum_entry(
  384. sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
  385. if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
  386. set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
  387. /* skip finfo */
  388. }
  389. static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
  390. struct inode *inode)
  391. {
  392. struct nilfs_finfo *finfo;
  393. struct nilfs_inode_info *ii;
  394. struct nilfs_segment_buffer *segbuf;
  395. __u64 cno;
  396. if (sci->sc_blk_cnt == 0)
  397. return;
  398. ii = NILFS_I(inode);
  399. if (test_bit(NILFS_I_GCINODE, &ii->i_state))
  400. cno = ii->i_cno;
  401. else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
  402. cno = 0;
  403. else
  404. cno = sci->sc_cno;
  405. finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
  406. sizeof(*finfo));
  407. finfo->fi_ino = cpu_to_le64(inode->i_ino);
  408. finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
  409. finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
  410. finfo->fi_cno = cpu_to_le64(cno);
  411. segbuf = sci->sc_curseg;
  412. segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
  413. sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
  414. sci->sc_finfo_ptr = sci->sc_binfo_ptr;
  415. sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
  416. }
  417. static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
  418. struct buffer_head *bh,
  419. struct inode *inode,
  420. unsigned binfo_size)
  421. {
  422. struct nilfs_segment_buffer *segbuf;
  423. int required, err = 0;
  424. retry:
  425. segbuf = sci->sc_curseg;
  426. required = nilfs_segctor_segsum_block_required(
  427. sci, &sci->sc_binfo_ptr, binfo_size);
  428. if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
  429. nilfs_segctor_end_finfo(sci, inode);
  430. err = nilfs_segctor_feed_segment(sci);
  431. if (err)
  432. return err;
  433. goto retry;
  434. }
  435. if (unlikely(required)) {
  436. err = nilfs_segbuf_extend_segsum(segbuf);
  437. if (unlikely(err))
  438. goto failed;
  439. }
  440. if (sci->sc_blk_cnt == 0)
  441. nilfs_segctor_begin_finfo(sci, inode);
  442. nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
  443. /* Substitution to vblocknr is delayed until update_blocknr() */
  444. nilfs_segbuf_add_file_buffer(segbuf, bh);
  445. sci->sc_blk_cnt++;
  446. failed:
  447. return err;
  448. }
  449. static int nilfs_handle_bmap_error(int err, const char *fname,
  450. struct inode *inode, struct super_block *sb)
  451. {
  452. if (err == -EINVAL) {
  453. nilfs_error(sb, fname, "broken bmap (inode=%lu)\n",
  454. inode->i_ino);
  455. err = -EIO;
  456. }
  457. return err;
  458. }
  459. /*
  460. * Callback functions that enumerate, mark, and collect dirty blocks
  461. */
  462. static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
  463. struct buffer_head *bh, struct inode *inode)
  464. {
  465. int err;
  466. err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
  467. if (unlikely(err < 0))
  468. return nilfs_handle_bmap_error(err, __func__, inode,
  469. sci->sc_super);
  470. err = nilfs_segctor_add_file_block(sci, bh, inode,
  471. sizeof(struct nilfs_binfo_v));
  472. if (!err)
  473. sci->sc_datablk_cnt++;
  474. return err;
  475. }
  476. static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
  477. struct buffer_head *bh,
  478. struct inode *inode)
  479. {
  480. int err;
  481. err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
  482. if (unlikely(err < 0))
  483. return nilfs_handle_bmap_error(err, __func__, inode,
  484. sci->sc_super);
  485. return 0;
  486. }
  487. static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
  488. struct buffer_head *bh,
  489. struct inode *inode)
  490. {
  491. WARN_ON(!buffer_dirty(bh));
  492. return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
  493. }
  494. static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
  495. struct nilfs_segsum_pointer *ssp,
  496. union nilfs_binfo *binfo)
  497. {
  498. struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
  499. sci, ssp, sizeof(*binfo_v));
  500. *binfo_v = binfo->bi_v;
  501. }
  502. static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
  503. struct nilfs_segsum_pointer *ssp,
  504. union nilfs_binfo *binfo)
  505. {
  506. __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
  507. sci, ssp, sizeof(*vblocknr));
  508. *vblocknr = binfo->bi_v.bi_vblocknr;
  509. }
  510. static struct nilfs_sc_operations nilfs_sc_file_ops = {
  511. .collect_data = nilfs_collect_file_data,
  512. .collect_node = nilfs_collect_file_node,
  513. .collect_bmap = nilfs_collect_file_bmap,
  514. .write_data_binfo = nilfs_write_file_data_binfo,
  515. .write_node_binfo = nilfs_write_file_node_binfo,
  516. };
  517. static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
  518. struct buffer_head *bh, struct inode *inode)
  519. {
  520. int err;
  521. err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
  522. if (unlikely(err < 0))
  523. return nilfs_handle_bmap_error(err, __func__, inode,
  524. sci->sc_super);
  525. err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
  526. if (!err)
  527. sci->sc_datablk_cnt++;
  528. return err;
  529. }
  530. static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
  531. struct buffer_head *bh, struct inode *inode)
  532. {
  533. WARN_ON(!buffer_dirty(bh));
  534. return nilfs_segctor_add_file_block(sci, bh, inode,
  535. sizeof(struct nilfs_binfo_dat));
  536. }
  537. static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
  538. struct nilfs_segsum_pointer *ssp,
  539. union nilfs_binfo *binfo)
  540. {
  541. __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
  542. sizeof(*blkoff));
  543. *blkoff = binfo->bi_dat.bi_blkoff;
  544. }
  545. static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
  546. struct nilfs_segsum_pointer *ssp,
  547. union nilfs_binfo *binfo)
  548. {
  549. struct nilfs_binfo_dat *binfo_dat =
  550. nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
  551. *binfo_dat = binfo->bi_dat;
  552. }
  553. static struct nilfs_sc_operations nilfs_sc_dat_ops = {
  554. .collect_data = nilfs_collect_dat_data,
  555. .collect_node = nilfs_collect_file_node,
  556. .collect_bmap = nilfs_collect_dat_bmap,
  557. .write_data_binfo = nilfs_write_dat_data_binfo,
  558. .write_node_binfo = nilfs_write_dat_node_binfo,
  559. };
  560. static struct nilfs_sc_operations nilfs_sc_dsync_ops = {
  561. .collect_data = nilfs_collect_file_data,
  562. .collect_node = NULL,
  563. .collect_bmap = NULL,
  564. .write_data_binfo = nilfs_write_file_data_binfo,
  565. .write_node_binfo = NULL,
  566. };
  567. static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
  568. struct list_head *listp,
  569. size_t nlimit,
  570. loff_t start, loff_t end)
  571. {
  572. struct address_space *mapping = inode->i_mapping;
  573. struct pagevec pvec;
  574. pgoff_t index = 0, last = ULONG_MAX;
  575. size_t ndirties = 0;
  576. int i;
  577. if (unlikely(start != 0 || end != LLONG_MAX)) {
  578. /*
  579. * A valid range is given for sync-ing data pages. The
  580. * range is rounded to per-page; extra dirty buffers
  581. * may be included if blocksize < pagesize.
  582. */
  583. index = start >> PAGE_SHIFT;
  584. last = end >> PAGE_SHIFT;
  585. }
  586. pagevec_init(&pvec, 0);
  587. repeat:
  588. if (unlikely(index > last) ||
  589. !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
  590. min_t(pgoff_t, last - index,
  591. PAGEVEC_SIZE - 1) + 1))
  592. return ndirties;
  593. for (i = 0; i < pagevec_count(&pvec); i++) {
  594. struct buffer_head *bh, *head;
  595. struct page *page = pvec.pages[i];
  596. if (unlikely(page->index > last))
  597. break;
  598. if (mapping->host) {
  599. lock_page(page);
  600. if (!page_has_buffers(page))
  601. create_empty_buffers(page,
  602. 1 << inode->i_blkbits, 0);
  603. unlock_page(page);
  604. }
  605. bh = head = page_buffers(page);
  606. do {
  607. if (!buffer_dirty(bh))
  608. continue;
  609. get_bh(bh);
  610. list_add_tail(&bh->b_assoc_buffers, listp);
  611. ndirties++;
  612. if (unlikely(ndirties >= nlimit)) {
  613. pagevec_release(&pvec);
  614. cond_resched();
  615. return ndirties;
  616. }
  617. } while (bh = bh->b_this_page, bh != head);
  618. }
  619. pagevec_release(&pvec);
  620. cond_resched();
  621. goto repeat;
  622. }
  623. static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
  624. struct list_head *listp)
  625. {
  626. struct nilfs_inode_info *ii = NILFS_I(inode);
  627. struct address_space *mapping = &ii->i_btnode_cache;
  628. struct pagevec pvec;
  629. struct buffer_head *bh, *head;
  630. unsigned int i;
  631. pgoff_t index = 0;
  632. pagevec_init(&pvec, 0);
  633. while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
  634. PAGEVEC_SIZE)) {
  635. for (i = 0; i < pagevec_count(&pvec); i++) {
  636. bh = head = page_buffers(pvec.pages[i]);
  637. do {
  638. if (buffer_dirty(bh)) {
  639. get_bh(bh);
  640. list_add_tail(&bh->b_assoc_buffers,
  641. listp);
  642. }
  643. bh = bh->b_this_page;
  644. } while (bh != head);
  645. }
  646. pagevec_release(&pvec);
  647. cond_resched();
  648. }
  649. }
  650. static void nilfs_dispose_list(struct nilfs_sb_info *sbi,
  651. struct list_head *head, int force)
  652. {
  653. struct nilfs_inode_info *ii, *n;
  654. struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
  655. unsigned nv = 0;
  656. while (!list_empty(head)) {
  657. spin_lock(&sbi->s_inode_lock);
  658. list_for_each_entry_safe(ii, n, head, i_dirty) {
  659. list_del_init(&ii->i_dirty);
  660. if (force) {
  661. if (unlikely(ii->i_bh)) {
  662. brelse(ii->i_bh);
  663. ii->i_bh = NULL;
  664. }
  665. } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
  666. set_bit(NILFS_I_QUEUED, &ii->i_state);
  667. list_add_tail(&ii->i_dirty,
  668. &sbi->s_dirty_files);
  669. continue;
  670. }
  671. ivec[nv++] = ii;
  672. if (nv == SC_N_INODEVEC)
  673. break;
  674. }
  675. spin_unlock(&sbi->s_inode_lock);
  676. for (pii = ivec; nv > 0; pii++, nv--)
  677. iput(&(*pii)->vfs_inode);
  678. }
  679. }
  680. static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
  681. struct nilfs_root *root)
  682. {
  683. int ret = 0;
  684. if (nilfs_mdt_fetch_dirty(root->ifile))
  685. ret++;
  686. if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
  687. ret++;
  688. if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
  689. ret++;
  690. if (ret || nilfs_doing_gc())
  691. if (nilfs_mdt_fetch_dirty(nilfs_dat_inode(nilfs)))
  692. ret++;
  693. return ret;
  694. }
  695. static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
  696. {
  697. return list_empty(&sci->sc_dirty_files) &&
  698. !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
  699. sci->sc_nfreesegs == 0 &&
  700. (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
  701. }
  702. static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
  703. {
  704. struct nilfs_sb_info *sbi = sci->sc_sbi;
  705. int ret = 0;
  706. if (nilfs_test_metadata_dirty(sbi->s_nilfs, sci->sc_root))
  707. set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
  708. spin_lock(&sbi->s_inode_lock);
  709. if (list_empty(&sbi->s_dirty_files) && nilfs_segctor_clean(sci))
  710. ret++;
  711. spin_unlock(&sbi->s_inode_lock);
  712. return ret;
  713. }
  714. static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
  715. {
  716. struct nilfs_sb_info *sbi = sci->sc_sbi;
  717. struct the_nilfs *nilfs = sbi->s_nilfs;
  718. nilfs_mdt_clear_dirty(sci->sc_root->ifile);
  719. nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
  720. nilfs_mdt_clear_dirty(nilfs->ns_sufile);
  721. nilfs_mdt_clear_dirty(nilfs_dat_inode(nilfs));
  722. }
  723. static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
  724. {
  725. struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
  726. struct buffer_head *bh_cp;
  727. struct nilfs_checkpoint *raw_cp;
  728. int err;
  729. /* XXX: this interface will be changed */
  730. err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
  731. &raw_cp, &bh_cp);
  732. if (likely(!err)) {
  733. /* The following code is duplicated with cpfile. But, it is
  734. needed to collect the checkpoint even if it was not newly
  735. created */
  736. nilfs_mdt_mark_buffer_dirty(bh_cp);
  737. nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
  738. nilfs_cpfile_put_checkpoint(
  739. nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
  740. } else
  741. WARN_ON(err == -EINVAL || err == -ENOENT);
  742. return err;
  743. }
  744. static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
  745. {
  746. struct nilfs_sb_info *sbi = sci->sc_sbi;
  747. struct the_nilfs *nilfs = sbi->s_nilfs;
  748. struct buffer_head *bh_cp;
  749. struct nilfs_checkpoint *raw_cp;
  750. int err;
  751. err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
  752. &raw_cp, &bh_cp);
  753. if (unlikely(err)) {
  754. WARN_ON(err == -EINVAL || err == -ENOENT);
  755. goto failed_ibh;
  756. }
  757. raw_cp->cp_snapshot_list.ssl_next = 0;
  758. raw_cp->cp_snapshot_list.ssl_prev = 0;
  759. raw_cp->cp_inodes_count =
  760. cpu_to_le64(atomic_read(&sci->sc_root->inodes_count));
  761. raw_cp->cp_blocks_count =
  762. cpu_to_le64(atomic_read(&sci->sc_root->blocks_count));
  763. raw_cp->cp_nblk_inc =
  764. cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
  765. raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
  766. raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
  767. if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
  768. nilfs_checkpoint_clear_minor(raw_cp);
  769. else
  770. nilfs_checkpoint_set_minor(raw_cp);
  771. nilfs_write_inode_common(sci->sc_root->ifile,
  772. &raw_cp->cp_ifile_inode, 1);
  773. nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
  774. return 0;
  775. failed_ibh:
  776. return err;
  777. }
  778. static void nilfs_fill_in_file_bmap(struct inode *ifile,
  779. struct nilfs_inode_info *ii)
  780. {
  781. struct buffer_head *ibh;
  782. struct nilfs_inode *raw_inode;
  783. if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
  784. ibh = ii->i_bh;
  785. BUG_ON(!ibh);
  786. raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
  787. ibh);
  788. nilfs_bmap_write(ii->i_bmap, raw_inode);
  789. nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
  790. }
  791. }
  792. static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
  793. {
  794. struct nilfs_inode_info *ii;
  795. list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
  796. nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
  797. set_bit(NILFS_I_COLLECTED, &ii->i_state);
  798. }
  799. }
  800. static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
  801. struct the_nilfs *nilfs)
  802. {
  803. struct buffer_head *bh_sr;
  804. struct nilfs_super_root *raw_sr;
  805. unsigned isz = nilfs->ns_inode_size;
  806. bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
  807. raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
  808. raw_sr->sr_bytes = cpu_to_le16(NILFS_SR_BYTES);
  809. raw_sr->sr_nongc_ctime
  810. = cpu_to_le64(nilfs_doing_gc() ?
  811. nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
  812. raw_sr->sr_flags = 0;
  813. nilfs_write_inode_common(nilfs_dat_inode(nilfs), (void *)raw_sr +
  814. NILFS_SR_DAT_OFFSET(isz), 1);
  815. nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
  816. NILFS_SR_CPFILE_OFFSET(isz), 1);
  817. nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
  818. NILFS_SR_SUFILE_OFFSET(isz), 1);
  819. }
  820. static void nilfs_redirty_inodes(struct list_head *head)
  821. {
  822. struct nilfs_inode_info *ii;
  823. list_for_each_entry(ii, head, i_dirty) {
  824. if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
  825. clear_bit(NILFS_I_COLLECTED, &ii->i_state);
  826. }
  827. }
  828. static void nilfs_drop_collected_inodes(struct list_head *head)
  829. {
  830. struct nilfs_inode_info *ii;
  831. list_for_each_entry(ii, head, i_dirty) {
  832. if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
  833. continue;
  834. clear_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
  835. set_bit(NILFS_I_UPDATED, &ii->i_state);
  836. }
  837. }
  838. static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
  839. struct inode *inode,
  840. struct list_head *listp,
  841. int (*collect)(struct nilfs_sc_info *,
  842. struct buffer_head *,
  843. struct inode *))
  844. {
  845. struct buffer_head *bh, *n;
  846. int err = 0;
  847. if (collect) {
  848. list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
  849. list_del_init(&bh->b_assoc_buffers);
  850. err = collect(sci, bh, inode);
  851. brelse(bh);
  852. if (unlikely(err))
  853. goto dispose_buffers;
  854. }
  855. return 0;
  856. }
  857. dispose_buffers:
  858. while (!list_empty(listp)) {
  859. bh = list_entry(listp->next, struct buffer_head,
  860. b_assoc_buffers);
  861. list_del_init(&bh->b_assoc_buffers);
  862. brelse(bh);
  863. }
  864. return err;
  865. }
  866. static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
  867. {
  868. /* Remaining number of blocks within segment buffer */
  869. return sci->sc_segbuf_nblocks -
  870. (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
  871. }
  872. static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
  873. struct inode *inode,
  874. struct nilfs_sc_operations *sc_ops)
  875. {
  876. LIST_HEAD(data_buffers);
  877. LIST_HEAD(node_buffers);
  878. int err;
  879. if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
  880. size_t n, rest = nilfs_segctor_buffer_rest(sci);
  881. n = nilfs_lookup_dirty_data_buffers(
  882. inode, &data_buffers, rest + 1, 0, LLONG_MAX);
  883. if (n > rest) {
  884. err = nilfs_segctor_apply_buffers(
  885. sci, inode, &data_buffers,
  886. sc_ops->collect_data);
  887. BUG_ON(!err); /* always receive -E2BIG or true error */
  888. goto break_or_fail;
  889. }
  890. }
  891. nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
  892. if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
  893. err = nilfs_segctor_apply_buffers(
  894. sci, inode, &data_buffers, sc_ops->collect_data);
  895. if (unlikely(err)) {
  896. /* dispose node list */
  897. nilfs_segctor_apply_buffers(
  898. sci, inode, &node_buffers, NULL);
  899. goto break_or_fail;
  900. }
  901. sci->sc_stage.flags |= NILFS_CF_NODE;
  902. }
  903. /* Collect node */
  904. err = nilfs_segctor_apply_buffers(
  905. sci, inode, &node_buffers, sc_ops->collect_node);
  906. if (unlikely(err))
  907. goto break_or_fail;
  908. nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
  909. err = nilfs_segctor_apply_buffers(
  910. sci, inode, &node_buffers, sc_ops->collect_bmap);
  911. if (unlikely(err))
  912. goto break_or_fail;
  913. nilfs_segctor_end_finfo(sci, inode);
  914. sci->sc_stage.flags &= ~NILFS_CF_NODE;
  915. break_or_fail:
  916. return err;
  917. }
  918. static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
  919. struct inode *inode)
  920. {
  921. LIST_HEAD(data_buffers);
  922. size_t n, rest = nilfs_segctor_buffer_rest(sci);
  923. int err;
  924. n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
  925. sci->sc_dsync_start,
  926. sci->sc_dsync_end);
  927. err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
  928. nilfs_collect_file_data);
  929. if (!err) {
  930. nilfs_segctor_end_finfo(sci, inode);
  931. BUG_ON(n > rest);
  932. /* always receive -E2BIG or true error if n > rest */
  933. }
  934. return err;
  935. }
  936. static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
  937. {
  938. struct nilfs_sb_info *sbi = sci->sc_sbi;
  939. struct the_nilfs *nilfs = sbi->s_nilfs;
  940. struct list_head *head;
  941. struct nilfs_inode_info *ii;
  942. size_t ndone;
  943. int err = 0;
  944. switch (sci->sc_stage.scnt) {
  945. case NILFS_ST_INIT:
  946. /* Pre-processes */
  947. sci->sc_stage.flags = 0;
  948. if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
  949. sci->sc_nblk_inc = 0;
  950. sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
  951. if (mode == SC_LSEG_DSYNC) {
  952. sci->sc_stage.scnt = NILFS_ST_DSYNC;
  953. goto dsync_mode;
  954. }
  955. }
  956. sci->sc_stage.dirty_file_ptr = NULL;
  957. sci->sc_stage.gc_inode_ptr = NULL;
  958. if (mode == SC_FLUSH_DAT) {
  959. sci->sc_stage.scnt = NILFS_ST_DAT;
  960. goto dat_stage;
  961. }
  962. sci->sc_stage.scnt++; /* Fall through */
  963. case NILFS_ST_GC:
  964. if (nilfs_doing_gc()) {
  965. head = &sci->sc_gc_inodes;
  966. ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
  967. head, i_dirty);
  968. list_for_each_entry_continue(ii, head, i_dirty) {
  969. err = nilfs_segctor_scan_file(
  970. sci, &ii->vfs_inode,
  971. &nilfs_sc_file_ops);
  972. if (unlikely(err)) {
  973. sci->sc_stage.gc_inode_ptr = list_entry(
  974. ii->i_dirty.prev,
  975. struct nilfs_inode_info,
  976. i_dirty);
  977. goto break_or_fail;
  978. }
  979. set_bit(NILFS_I_COLLECTED, &ii->i_state);
  980. }
  981. sci->sc_stage.gc_inode_ptr = NULL;
  982. }
  983. sci->sc_stage.scnt++; /* Fall through */
  984. case NILFS_ST_FILE:
  985. head = &sci->sc_dirty_files;
  986. ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
  987. i_dirty);
  988. list_for_each_entry_continue(ii, head, i_dirty) {
  989. clear_bit(NILFS_I_DIRTY, &ii->i_state);
  990. err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
  991. &nilfs_sc_file_ops);
  992. if (unlikely(err)) {
  993. sci->sc_stage.dirty_file_ptr =
  994. list_entry(ii->i_dirty.prev,
  995. struct nilfs_inode_info,
  996. i_dirty);
  997. goto break_or_fail;
  998. }
  999. /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
  1000. /* XXX: required ? */
  1001. }
  1002. sci->sc_stage.dirty_file_ptr = NULL;
  1003. if (mode == SC_FLUSH_FILE) {
  1004. sci->sc_stage.scnt = NILFS_ST_DONE;
  1005. return 0;
  1006. }
  1007. sci->sc_stage.scnt++;
  1008. sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
  1009. /* Fall through */
  1010. case NILFS_ST_IFILE:
  1011. err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
  1012. &nilfs_sc_file_ops);
  1013. if (unlikely(err))
  1014. break;
  1015. sci->sc_stage.scnt++;
  1016. /* Creating a checkpoint */
  1017. err = nilfs_segctor_create_checkpoint(sci);
  1018. if (unlikely(err))
  1019. break;
  1020. /* Fall through */
  1021. case NILFS_ST_CPFILE:
  1022. err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
  1023. &nilfs_sc_file_ops);
  1024. if (unlikely(err))
  1025. break;
  1026. sci->sc_stage.scnt++; /* Fall through */
  1027. case NILFS_ST_SUFILE:
  1028. err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
  1029. sci->sc_nfreesegs, &ndone);
  1030. if (unlikely(err)) {
  1031. nilfs_sufile_cancel_freev(nilfs->ns_sufile,
  1032. sci->sc_freesegs, ndone,
  1033. NULL);
  1034. break;
  1035. }
  1036. sci->sc_stage.flags |= NILFS_CF_SUFREED;
  1037. err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
  1038. &nilfs_sc_file_ops);
  1039. if (unlikely(err))
  1040. break;
  1041. sci->sc_stage.scnt++; /* Fall through */
  1042. case NILFS_ST_DAT:
  1043. dat_stage:
  1044. err = nilfs_segctor_scan_file(sci, nilfs_dat_inode(nilfs),
  1045. &nilfs_sc_dat_ops);
  1046. if (unlikely(err))
  1047. break;
  1048. if (mode == SC_FLUSH_DAT) {
  1049. sci->sc_stage.scnt = NILFS_ST_DONE;
  1050. return 0;
  1051. }
  1052. sci->sc_stage.scnt++; /* Fall through */
  1053. case NILFS_ST_SR:
  1054. if (mode == SC_LSEG_SR) {
  1055. /* Appending a super root */
  1056. err = nilfs_segctor_add_super_root(sci);
  1057. if (unlikely(err))
  1058. break;
  1059. }
  1060. /* End of a logical segment */
  1061. sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
  1062. sci->sc_stage.scnt = NILFS_ST_DONE;
  1063. return 0;
  1064. case NILFS_ST_DSYNC:
  1065. dsync_mode:
  1066. sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
  1067. ii = sci->sc_dsync_inode;
  1068. if (!test_bit(NILFS_I_BUSY, &ii->i_state))
  1069. break;
  1070. err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
  1071. if (unlikely(err))
  1072. break;
  1073. sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
  1074. sci->sc_stage.scnt = NILFS_ST_DONE;
  1075. return 0;
  1076. case NILFS_ST_DONE:
  1077. return 0;
  1078. default:
  1079. BUG();
  1080. }
  1081. break_or_fail:
  1082. return err;
  1083. }
  1084. /**
  1085. * nilfs_segctor_begin_construction - setup segment buffer to make a new log
  1086. * @sci: nilfs_sc_info
  1087. * @nilfs: nilfs object
  1088. */
  1089. static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
  1090. struct the_nilfs *nilfs)
  1091. {
  1092. struct nilfs_segment_buffer *segbuf, *prev;
  1093. __u64 nextnum;
  1094. int err, alloc = 0;
  1095. segbuf = nilfs_segbuf_new(sci->sc_super);
  1096. if (unlikely(!segbuf))
  1097. return -ENOMEM;
  1098. if (list_empty(&sci->sc_write_logs)) {
  1099. nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
  1100. nilfs->ns_pseg_offset, nilfs);
  1101. if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
  1102. nilfs_shift_to_next_segment(nilfs);
  1103. nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
  1104. }
  1105. segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
  1106. nextnum = nilfs->ns_nextnum;
  1107. if (nilfs->ns_segnum == nilfs->ns_nextnum)
  1108. /* Start from the head of a new full segment */
  1109. alloc++;
  1110. } else {
  1111. /* Continue logs */
  1112. prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
  1113. nilfs_segbuf_map_cont(segbuf, prev);
  1114. segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
  1115. nextnum = prev->sb_nextnum;
  1116. if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
  1117. nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
  1118. segbuf->sb_sum.seg_seq++;
  1119. alloc++;
  1120. }
  1121. }
  1122. err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
  1123. if (err)
  1124. goto failed;
  1125. if (alloc) {
  1126. err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
  1127. if (err)
  1128. goto failed;
  1129. }
  1130. nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
  1131. BUG_ON(!list_empty(&sci->sc_segbufs));
  1132. list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
  1133. sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
  1134. return 0;
  1135. failed:
  1136. nilfs_segbuf_free(segbuf);
  1137. return err;
  1138. }
  1139. static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
  1140. struct the_nilfs *nilfs, int nadd)
  1141. {
  1142. struct nilfs_segment_buffer *segbuf, *prev;
  1143. struct inode *sufile = nilfs->ns_sufile;
  1144. __u64 nextnextnum;
  1145. LIST_HEAD(list);
  1146. int err, ret, i;
  1147. prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
  1148. /*
  1149. * Since the segment specified with nextnum might be allocated during
  1150. * the previous construction, the buffer including its segusage may
  1151. * not be dirty. The following call ensures that the buffer is dirty
  1152. * and will pin the buffer on memory until the sufile is written.
  1153. */
  1154. err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
  1155. if (unlikely(err))
  1156. return err;
  1157. for (i = 0; i < nadd; i++) {
  1158. /* extend segment info */
  1159. err = -ENOMEM;
  1160. segbuf = nilfs_segbuf_new(sci->sc_super);
  1161. if (unlikely(!segbuf))
  1162. goto failed;
  1163. /* map this buffer to region of segment on-disk */
  1164. nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
  1165. sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
  1166. /* allocate the next next full segment */
  1167. err = nilfs_sufile_alloc(sufile, &nextnextnum);
  1168. if (unlikely(err))
  1169. goto failed_segbuf;
  1170. segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
  1171. nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
  1172. list_add_tail(&segbuf->sb_list, &list);
  1173. prev = segbuf;
  1174. }
  1175. list_splice_tail(&list, &sci->sc_segbufs);
  1176. return 0;
  1177. failed_segbuf:
  1178. nilfs_segbuf_free(segbuf);
  1179. failed:
  1180. list_for_each_entry(segbuf, &list, sb_list) {
  1181. ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
  1182. WARN_ON(ret); /* never fails */
  1183. }
  1184. nilfs_destroy_logs(&list);
  1185. return err;
  1186. }
  1187. static void nilfs_free_incomplete_logs(struct list_head *logs,
  1188. struct the_nilfs *nilfs)
  1189. {
  1190. struct nilfs_segment_buffer *segbuf, *prev;
  1191. struct inode *sufile = nilfs->ns_sufile;
  1192. int ret;
  1193. segbuf = NILFS_FIRST_SEGBUF(logs);
  1194. if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
  1195. ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
  1196. WARN_ON(ret); /* never fails */
  1197. }
  1198. if (atomic_read(&segbuf->sb_err)) {
  1199. /* Case 1: The first segment failed */
  1200. if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
  1201. /* Case 1a: Partial segment appended into an existing
  1202. segment */
  1203. nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
  1204. segbuf->sb_fseg_end);
  1205. else /* Case 1b: New full segment */
  1206. set_nilfs_discontinued(nilfs);
  1207. }
  1208. prev = segbuf;
  1209. list_for_each_entry_continue(segbuf, logs, sb_list) {
  1210. if (prev->sb_nextnum != segbuf->sb_nextnum) {
  1211. ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
  1212. WARN_ON(ret); /* never fails */
  1213. }
  1214. if (atomic_read(&segbuf->sb_err) &&
  1215. segbuf->sb_segnum != nilfs->ns_nextnum)
  1216. /* Case 2: extended segment (!= next) failed */
  1217. nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
  1218. prev = segbuf;
  1219. }
  1220. }
  1221. static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
  1222. struct inode *sufile)
  1223. {
  1224. struct nilfs_segment_buffer *segbuf;
  1225. unsigned long live_blocks;
  1226. int ret;
  1227. list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
  1228. live_blocks = segbuf->sb_sum.nblocks +
  1229. (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
  1230. ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
  1231. live_blocks,
  1232. sci->sc_seg_ctime);
  1233. WARN_ON(ret); /* always succeed because the segusage is dirty */
  1234. }
  1235. }
  1236. static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
  1237. {
  1238. struct nilfs_segment_buffer *segbuf;
  1239. int ret;
  1240. segbuf = NILFS_FIRST_SEGBUF(logs);
  1241. ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
  1242. segbuf->sb_pseg_start -
  1243. segbuf->sb_fseg_start, 0);
  1244. WARN_ON(ret); /* always succeed because the segusage is dirty */
  1245. list_for_each_entry_continue(segbuf, logs, sb_list) {
  1246. ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
  1247. 0, 0);
  1248. WARN_ON(ret); /* always succeed */
  1249. }
  1250. }
  1251. static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
  1252. struct nilfs_segment_buffer *last,
  1253. struct inode *sufile)
  1254. {
  1255. struct nilfs_segment_buffer *segbuf = last;
  1256. int ret;
  1257. list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
  1258. sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
  1259. ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
  1260. WARN_ON(ret);
  1261. }
  1262. nilfs_truncate_logs(&sci->sc_segbufs, last);
  1263. }
  1264. static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
  1265. struct the_nilfs *nilfs, int mode)
  1266. {
  1267. struct nilfs_cstage prev_stage = sci->sc_stage;
  1268. int err, nadd = 1;
  1269. /* Collection retry loop */
  1270. for (;;) {
  1271. sci->sc_nblk_this_inc = 0;
  1272. sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
  1273. err = nilfs_segctor_reset_segment_buffer(sci);
  1274. if (unlikely(err))
  1275. goto failed;
  1276. err = nilfs_segctor_collect_blocks(sci, mode);
  1277. sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
  1278. if (!err)
  1279. break;
  1280. if (unlikely(err != -E2BIG))
  1281. goto failed;
  1282. /* The current segment is filled up */
  1283. if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
  1284. break;
  1285. nilfs_clear_logs(&sci->sc_segbufs);
  1286. err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
  1287. if (unlikely(err))
  1288. return err;
  1289. if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
  1290. err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
  1291. sci->sc_freesegs,
  1292. sci->sc_nfreesegs,
  1293. NULL);
  1294. WARN_ON(err); /* do not happen */
  1295. }
  1296. nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
  1297. sci->sc_stage = prev_stage;
  1298. }
  1299. nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
  1300. return 0;
  1301. failed:
  1302. return err;
  1303. }
  1304. static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
  1305. struct buffer_head *new_bh)
  1306. {
  1307. BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
  1308. list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
  1309. /* The caller must release old_bh */
  1310. }
  1311. static int
  1312. nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
  1313. struct nilfs_segment_buffer *segbuf,
  1314. int mode)
  1315. {
  1316. struct inode *inode = NULL;
  1317. sector_t blocknr;
  1318. unsigned long nfinfo = segbuf->sb_sum.nfinfo;
  1319. unsigned long nblocks = 0, ndatablk = 0;
  1320. struct nilfs_sc_operations *sc_op = NULL;
  1321. struct nilfs_segsum_pointer ssp;
  1322. struct nilfs_finfo *finfo = NULL;
  1323. union nilfs_binfo binfo;
  1324. struct buffer_head *bh, *bh_org;
  1325. ino_t ino = 0;
  1326. int err = 0;
  1327. if (!nfinfo)
  1328. goto out;
  1329. blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
  1330. ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
  1331. ssp.offset = sizeof(struct nilfs_segment_summary);
  1332. list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
  1333. if (bh == segbuf->sb_super_root)
  1334. break;
  1335. if (!finfo) {
  1336. finfo = nilfs_segctor_map_segsum_entry(
  1337. sci, &ssp, sizeof(*finfo));
  1338. ino = le64_to_cpu(finfo->fi_ino);
  1339. nblocks = le32_to_cpu(finfo->fi_nblocks);
  1340. ndatablk = le32_to_cpu(finfo->fi_ndatablk);
  1341. if (buffer_nilfs_node(bh))
  1342. inode = NILFS_BTNC_I(bh->b_page->mapping);
  1343. else
  1344. inode = NILFS_AS_I(bh->b_page->mapping);
  1345. if (mode == SC_LSEG_DSYNC)
  1346. sc_op = &nilfs_sc_dsync_ops;
  1347. else if (ino == NILFS_DAT_INO)
  1348. sc_op = &nilfs_sc_dat_ops;
  1349. else /* file blocks */
  1350. sc_op = &nilfs_sc_file_ops;
  1351. }
  1352. bh_org = bh;
  1353. get_bh(bh_org);
  1354. err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
  1355. &binfo);
  1356. if (bh != bh_org)
  1357. nilfs_list_replace_buffer(bh_org, bh);
  1358. brelse(bh_org);
  1359. if (unlikely(err))
  1360. goto failed_bmap;
  1361. if (ndatablk > 0)
  1362. sc_op->write_data_binfo(sci, &ssp, &binfo);
  1363. else
  1364. sc_op->write_node_binfo(sci, &ssp, &binfo);
  1365. blocknr++;
  1366. if (--nblocks == 0) {
  1367. finfo = NULL;
  1368. if (--nfinfo == 0)
  1369. break;
  1370. } else if (ndatablk > 0)
  1371. ndatablk--;
  1372. }
  1373. out:
  1374. return 0;
  1375. failed_bmap:
  1376. err = nilfs_handle_bmap_error(err, __func__, inode, sci->sc_super);
  1377. return err;
  1378. }
  1379. static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
  1380. {
  1381. struct nilfs_segment_buffer *segbuf;
  1382. int err;
  1383. list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
  1384. err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
  1385. if (unlikely(err))
  1386. return err;
  1387. nilfs_segbuf_fill_in_segsum(segbuf);
  1388. }
  1389. return 0;
  1390. }
  1391. static int
  1392. nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out)
  1393. {
  1394. struct page *clone_page;
  1395. struct buffer_head *bh, *head, *bh2;
  1396. void *kaddr;
  1397. bh = head = page_buffers(page);
  1398. clone_page = nilfs_alloc_private_page(bh->b_bdev, bh->b_size, 0);
  1399. if (unlikely(!clone_page))
  1400. return -ENOMEM;
  1401. bh2 = page_buffers(clone_page);
  1402. kaddr = kmap_atomic(page, KM_USER0);
  1403. do {
  1404. if (list_empty(&bh->b_assoc_buffers))
  1405. continue;
  1406. get_bh(bh2);
  1407. page_cache_get(clone_page); /* for each bh */
  1408. memcpy(bh2->b_data, kaddr + bh_offset(bh), bh2->b_size);
  1409. bh2->b_blocknr = bh->b_blocknr;
  1410. list_replace(&bh->b_assoc_buffers, &bh2->b_assoc_buffers);
  1411. list_add_tail(&bh->b_assoc_buffers, out);
  1412. } while (bh = bh->b_this_page, bh2 = bh2->b_this_page, bh != head);
  1413. kunmap_atomic(kaddr, KM_USER0);
  1414. if (!TestSetPageWriteback(clone_page))
  1415. account_page_writeback(clone_page);
  1416. unlock_page(clone_page);
  1417. return 0;
  1418. }
  1419. static int nilfs_test_page_to_be_frozen(struct page *page)
  1420. {
  1421. struct address_space *mapping = page->mapping;
  1422. if (!mapping || !mapping->host || S_ISDIR(mapping->host->i_mode))
  1423. return 0;
  1424. if (page_mapped(page)) {
  1425. ClearPageChecked(page);
  1426. return 1;
  1427. }
  1428. return PageChecked(page);
  1429. }
  1430. static int nilfs_begin_page_io(struct page *page, struct list_head *out)
  1431. {
  1432. if (!page || PageWriteback(page))
  1433. /* For split b-tree node pages, this function may be called
  1434. twice. We ignore the 2nd or later calls by this check. */
  1435. return 0;
  1436. lock_page(page);
  1437. clear_page_dirty_for_io(page);
  1438. set_page_writeback(page);
  1439. unlock_page(page);
  1440. if (nilfs_test_page_to_be_frozen(page)) {
  1441. int err = nilfs_copy_replace_page_buffers(page, out);
  1442. if (unlikely(err))
  1443. return err;
  1444. }
  1445. return 0;
  1446. }
  1447. static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
  1448. struct page **failed_page)
  1449. {
  1450. struct nilfs_segment_buffer *segbuf;
  1451. struct page *bd_page = NULL, *fs_page = NULL;
  1452. struct list_head *list = &sci->sc_copied_buffers;
  1453. int err;
  1454. *failed_page = NULL;
  1455. list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
  1456. struct buffer_head *bh;
  1457. list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
  1458. b_assoc_buffers) {
  1459. if (bh->b_page != bd_page) {
  1460. if (bd_page) {
  1461. lock_page(bd_page);
  1462. clear_page_dirty_for_io(bd_page);
  1463. set_page_writeback(bd_page);
  1464. unlock_page(bd_page);
  1465. }
  1466. bd_page = bh->b_page;
  1467. }
  1468. }
  1469. list_for_each_entry(bh, &segbuf->sb_payload_buffers,
  1470. b_assoc_buffers) {
  1471. if (bh == segbuf->sb_super_root) {
  1472. if (bh->b_page != bd_page) {
  1473. lock_page(bd_page);
  1474. clear_page_dirty_for_io(bd_page);
  1475. set_page_writeback(bd_page);
  1476. unlock_page(bd_page);
  1477. bd_page = bh->b_page;
  1478. }
  1479. break;
  1480. }
  1481. if (bh->b_page != fs_page) {
  1482. err = nilfs_begin_page_io(fs_page, list);
  1483. if (unlikely(err)) {
  1484. *failed_page = fs_page;
  1485. goto out;
  1486. }
  1487. fs_page = bh->b_page;
  1488. }
  1489. }
  1490. }
  1491. if (bd_page) {
  1492. lock_page(bd_page);
  1493. clear_page_dirty_for_io(bd_page);
  1494. set_page_writeback(bd_page);
  1495. unlock_page(bd_page);
  1496. }
  1497. err = nilfs_begin_page_io(fs_page, list);
  1498. if (unlikely(err))
  1499. *failed_page = fs_page;
  1500. out:
  1501. return err;
  1502. }
  1503. static int nilfs_segctor_write(struct nilfs_sc_info *sci,
  1504. struct the_nilfs *nilfs)
  1505. {
  1506. int ret;
  1507. ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
  1508. list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
  1509. return ret;
  1510. }
  1511. static void __nilfs_end_page_io(struct page *page, int err)
  1512. {
  1513. if (!err) {
  1514. if (!nilfs_page_buffers_clean(page))
  1515. __set_page_dirty_nobuffers(page);
  1516. ClearPageError(page);
  1517. } else {
  1518. __set_page_dirty_nobuffers(page);
  1519. SetPageError(page);
  1520. }
  1521. if (buffer_nilfs_allocated(page_buffers(page))) {
  1522. if (TestClearPageWriteback(page))
  1523. dec_zone_page_state(page, NR_WRITEBACK);
  1524. } else
  1525. end_page_writeback(page);
  1526. }
  1527. static void nilfs_end_page_io(struct page *page, int err)
  1528. {
  1529. if (!page)
  1530. return;
  1531. if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
  1532. /*
  1533. * For b-tree node pages, this function may be called twice
  1534. * or more because they might be split in a segment.
  1535. */
  1536. if (PageDirty(page)) {
  1537. /*
  1538. * For pages holding split b-tree node buffers, dirty
  1539. * flag on the buffers may be cleared discretely.
  1540. * In that case, the page is once redirtied for
  1541. * remaining buffers, and it must be cancelled if
  1542. * all the buffers get cleaned later.
  1543. */
  1544. lock_page(page);
  1545. if (nilfs_page_buffers_clean(page))
  1546. __nilfs_clear_page_dirty(page);
  1547. unlock_page(page);
  1548. }
  1549. return;
  1550. }
  1551. __nilfs_end_page_io(page, err);
  1552. }
  1553. static void nilfs_clear_copied_buffers(struct list_head *list, int err)
  1554. {
  1555. struct buffer_head *bh, *head;
  1556. struct page *page;
  1557. while (!list_empty(list)) {
  1558. bh = list_entry(list->next, struct buffer_head,
  1559. b_assoc_buffers);
  1560. page = bh->b_page;
  1561. page_cache_get(page);
  1562. head = bh = page_buffers(page);
  1563. do {
  1564. if (!list_empty(&bh->b_assoc_buffers)) {
  1565. list_del_init(&bh->b_assoc_buffers);
  1566. if (!err) {
  1567. set_buffer_uptodate(bh);
  1568. clear_buffer_dirty(bh);
  1569. clear_buffer_nilfs_volatile(bh);
  1570. }
  1571. brelse(bh); /* for b_assoc_buffers */
  1572. }
  1573. } while ((bh = bh->b_this_page) != head);
  1574. __nilfs_end_page_io(page, err);
  1575. page_cache_release(page);
  1576. }
  1577. }
  1578. static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
  1579. int err)
  1580. {
  1581. struct nilfs_segment_buffer *segbuf;
  1582. struct page *bd_page = NULL, *fs_page = NULL;
  1583. struct buffer_head *bh;
  1584. if (list_empty(logs))
  1585. return;
  1586. list_for_each_entry(segbuf, logs, sb_list) {
  1587. list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
  1588. b_assoc_buffers) {
  1589. if (bh->b_page != bd_page) {
  1590. if (bd_page)
  1591. end_page_writeback(bd_page);
  1592. bd_page = bh->b_page;
  1593. }
  1594. }
  1595. list_for_each_entry(bh, &segbuf->sb_payload_buffers,
  1596. b_assoc_buffers) {
  1597. if (bh == segbuf->sb_super_root) {
  1598. if (bh->b_page != bd_page) {
  1599. end_page_writeback(bd_page);
  1600. bd_page = bh->b_page;
  1601. }
  1602. break;
  1603. }
  1604. if (bh->b_page != fs_page) {
  1605. nilfs_end_page_io(fs_page, err);
  1606. if (fs_page && fs_page == failed_page)
  1607. return;
  1608. fs_page = bh->b_page;
  1609. }
  1610. }
  1611. }
  1612. if (bd_page)
  1613. end_page_writeback(bd_page);
  1614. nilfs_end_page_io(fs_page, err);
  1615. }
  1616. static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
  1617. struct the_nilfs *nilfs, int err)
  1618. {
  1619. LIST_HEAD(logs);
  1620. int ret;
  1621. list_splice_tail_init(&sci->sc_write_logs, &logs);
  1622. ret = nilfs_wait_on_logs(&logs);
  1623. nilfs_abort_logs(&logs, NULL, ret ? : err);
  1624. list_splice_tail_init(&sci->sc_segbufs, &logs);
  1625. nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
  1626. nilfs_free_incomplete_logs(&logs, nilfs);
  1627. nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err);
  1628. if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
  1629. ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
  1630. sci->sc_freesegs,
  1631. sci->sc_nfreesegs,
  1632. NULL);
  1633. WARN_ON(ret); /* do not happen */
  1634. }
  1635. nilfs_destroy_logs(&logs);
  1636. }
  1637. static void nilfs_set_next_segment(struct the_nilfs *nilfs,
  1638. struct nilfs_segment_buffer *segbuf)
  1639. {
  1640. nilfs->ns_segnum = segbuf->sb_segnum;
  1641. nilfs->ns_nextnum = segbuf->sb_nextnum;
  1642. nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
  1643. + segbuf->sb_sum.nblocks;
  1644. nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
  1645. nilfs->ns_ctime = segbuf->sb_sum.ctime;
  1646. }
  1647. static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
  1648. {
  1649. struct nilfs_segment_buffer *segbuf;
  1650. struct page *bd_page = NULL, *fs_page = NULL;
  1651. struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
  1652. int update_sr = false;
  1653. list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
  1654. struct buffer_head *bh;
  1655. list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
  1656. b_assoc_buffers) {
  1657. set_buffer_uptodate(bh);
  1658. clear_buffer_dirty(bh);
  1659. if (bh->b_page != bd_page) {
  1660. if (bd_page)
  1661. end_page_writeback(bd_page);
  1662. bd_page = bh->b_page;
  1663. }
  1664. }
  1665. /*
  1666. * We assume that the buffers which belong to the same page
  1667. * continue over the buffer list.
  1668. * Under this assumption, the last BHs of pages is
  1669. * identifiable by the discontinuity of bh->b_page
  1670. * (page != fs_page).
  1671. *
  1672. * For B-tree node blocks, however, this assumption is not
  1673. * guaranteed. The cleanup code of B-tree node pages needs
  1674. * special care.
  1675. */
  1676. list_for_each_entry(bh, &segbuf->sb_payload_buffers,
  1677. b_assoc_buffers) {
  1678. set_buffer_uptodate(bh);
  1679. clear_buffer_dirty(bh);
  1680. clear_buffer_nilfs_volatile(bh);
  1681. clear_buffer_nilfs_redirected(bh);
  1682. if (bh == segbuf->sb_super_root) {
  1683. if (bh->b_page != bd_page) {
  1684. end_page_writeback(bd_page);
  1685. bd_page = bh->b_page;
  1686. }
  1687. update_sr = true;
  1688. break;
  1689. }
  1690. if (bh->b_page != fs_page) {
  1691. nilfs_end_page_io(fs_page, 0);
  1692. fs_page = bh->b_page;
  1693. }
  1694. }
  1695. if (!nilfs_segbuf_simplex(segbuf)) {
  1696. if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
  1697. set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
  1698. sci->sc_lseg_stime = jiffies;
  1699. }
  1700. if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
  1701. clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
  1702. }
  1703. }
  1704. /*
  1705. * Since pages may continue over multiple segment buffers,
  1706. * end of the last page must be checked outside of the loop.
  1707. */
  1708. if (bd_page)
  1709. end_page_writeback(bd_page);
  1710. nilfs_end_page_io(fs_page, 0);
  1711. nilfs_clear_copied_buffers(&sci->sc_copied_buffers, 0);
  1712. nilfs_drop_collected_inodes(&sci->sc_dirty_files);
  1713. if (nilfs_doing_gc())
  1714. nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
  1715. else
  1716. nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
  1717. sci->sc_nblk_inc += sci->sc_nblk_this_inc;
  1718. segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
  1719. nilfs_set_next_segment(nilfs, segbuf);
  1720. if (update_sr) {
  1721. nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
  1722. segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
  1723. clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
  1724. clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
  1725. set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
  1726. nilfs_segctor_clear_metadata_dirty(sci);
  1727. } else
  1728. clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
  1729. }
  1730. static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
  1731. {
  1732. int ret;
  1733. ret = nilfs_wait_on_logs(&sci->sc_write_logs);
  1734. if (!ret) {
  1735. nilfs_segctor_complete_write(sci);
  1736. nilfs_destroy_logs(&sci->sc_write_logs);
  1737. }
  1738. return ret;
  1739. }
  1740. static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
  1741. struct nilfs_sb_info *sbi)
  1742. {
  1743. struct nilfs_inode_info *ii, *n;
  1744. struct inode *ifile = sci->sc_root->ifile;
  1745. spin_lock(&sbi->s_inode_lock);
  1746. retry:
  1747. list_for_each_entry_safe(ii, n, &sbi->s_dirty_files, i_dirty) {
  1748. if (!ii->i_bh) {
  1749. struct buffer_head *ibh;
  1750. int err;
  1751. spin_unlock(&sbi->s_inode_lock);
  1752. err = nilfs_ifile_get_inode_block(
  1753. ifile, ii->vfs_inode.i_ino, &ibh);
  1754. if (unlikely(err)) {
  1755. nilfs_warning(sbi->s_super, __func__,
  1756. "failed to get inode block.\n");
  1757. return err;
  1758. }
  1759. nilfs_mdt_mark_buffer_dirty(ibh);
  1760. nilfs_mdt_mark_dirty(ifile);
  1761. spin_lock(&sbi->s_inode_lock);
  1762. if (likely(!ii->i_bh))
  1763. ii->i_bh = ibh;
  1764. else
  1765. brelse(ibh);
  1766. goto retry;
  1767. }
  1768. clear_bit(NILFS_I_QUEUED, &ii->i_state);
  1769. set_bit(NILFS_I_BUSY, &ii->i_state);
  1770. list_del(&ii->i_dirty);
  1771. list_add_tail(&ii->i_dirty, &sci->sc_dirty_files);
  1772. }
  1773. spin_unlock(&sbi->s_inode_lock);
  1774. return 0;
  1775. }
  1776. static void nilfs_segctor_check_out_files(struct nilfs_sc_info *sci,
  1777. struct nilfs_sb_info *sbi)
  1778. {
  1779. struct nilfs_transaction_info *ti = current->journal_info;
  1780. struct nilfs_inode_info *ii, *n;
  1781. spin_lock(&sbi->s_inode_lock);
  1782. list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
  1783. if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
  1784. test_bit(NILFS_I_DIRTY, &ii->i_state))
  1785. continue;
  1786. clear_bit(NILFS_I_BUSY, &ii->i_state);
  1787. brelse(ii->i_bh);
  1788. ii->i_bh = NULL;
  1789. list_del(&ii->i_dirty);
  1790. list_add_tail(&ii->i_dirty, &ti->ti_garbage);
  1791. }
  1792. spin_unlock(&sbi->s_inode_lock);
  1793. }
  1794. /*
  1795. * Main procedure of segment constructor
  1796. */
  1797. static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
  1798. {
  1799. struct nilfs_sb_info *sbi = sci->sc_sbi;
  1800. struct the_nilfs *nilfs = sbi->s_nilfs;
  1801. struct page *failed_page;
  1802. int err;
  1803. sci->sc_stage.scnt = NILFS_ST_INIT;
  1804. sci->sc_cno = nilfs->ns_cno;
  1805. err = nilfs_segctor_check_in_files(sci, sbi);
  1806. if (unlikely(err))
  1807. goto out;
  1808. if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
  1809. set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
  1810. if (nilfs_segctor_clean(sci))
  1811. goto out;
  1812. do {
  1813. sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
  1814. err = nilfs_segctor_begin_construction(sci, nilfs);
  1815. if (unlikely(err))
  1816. goto out;
  1817. /* Update time stamp */
  1818. sci->sc_seg_ctime = get_seconds();
  1819. err = nilfs_segctor_collect(sci, nilfs, mode);
  1820. if (unlikely(err))
  1821. goto failed;
  1822. /* Avoid empty segment */
  1823. if (sci->sc_stage.scnt == NILFS_ST_DONE &&
  1824. nilfs_segbuf_empty(sci->sc_curseg)) {
  1825. nilfs_segctor_abort_construction(sci, nilfs, 1);
  1826. goto out;
  1827. }
  1828. err = nilfs_segctor_assign(sci, mode);
  1829. if (unlikely(err))
  1830. goto failed;
  1831. if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
  1832. nilfs_segctor_fill_in_file_bmap(sci);
  1833. if (mode == SC_LSEG_SR &&
  1834. sci->sc_stage.scnt >= NILFS_ST_CPFILE) {
  1835. err = nilfs_segctor_fill_in_checkpoint(sci);
  1836. if (unlikely(err))
  1837. goto failed_to_write;
  1838. nilfs_segctor_fill_in_super_root(sci, nilfs);
  1839. }
  1840. nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
  1841. /* Write partial segments */
  1842. err = nilfs_segctor_prepare_write(sci, &failed_page);
  1843. if (err) {
  1844. nilfs_abort_logs(&sci->sc_segbufs, failed_page, err);
  1845. goto failed_to_write;
  1846. }
  1847. nilfs_add_checksums_on_logs(&sci->sc_segbufs,
  1848. nilfs->ns_crc_seed);
  1849. err = nilfs_segctor_write(sci, nilfs);
  1850. if (unlikely(err))
  1851. goto failed_to_write;
  1852. if (sci->sc_stage.scnt == NILFS_ST_DONE ||
  1853. nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
  1854. /*
  1855. * At this point, we avoid double buffering
  1856. * for blocksize < pagesize because page dirty
  1857. * flag is turned off during write and dirty
  1858. * buffers are not properly collected for
  1859. * pages crossing over segments.
  1860. */
  1861. err = nilfs_segctor_wait(sci);
  1862. if (err)
  1863. goto failed_to_write;
  1864. }
  1865. } while (sci->sc_stage.scnt != NILFS_ST_DONE);
  1866. out:
  1867. nilfs_segctor_check_out_files(sci, sbi);
  1868. return err;
  1869. failed_to_write:
  1870. if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
  1871. nilfs_redirty_inodes(&sci->sc_dirty_files);
  1872. failed:
  1873. if (nilfs_doing_gc())
  1874. nilfs_redirty_inodes(&sci->sc_gc_inodes);
  1875. nilfs_segctor_abort_construction(sci, nilfs, err);
  1876. goto out;
  1877. }
  1878. /**
  1879. * nilfs_segctor_start_timer - set timer of background write
  1880. * @sci: nilfs_sc_info
  1881. *
  1882. * If the timer has already been set, it ignores the new request.
  1883. * This function MUST be called within a section locking the segment
  1884. * semaphore.
  1885. */
  1886. static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
  1887. {
  1888. spin_lock(&sci->sc_state_lock);
  1889. if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
  1890. sci->sc_timer.expires = jiffies + sci->sc_interval;
  1891. add_timer(&sci->sc_timer);
  1892. sci->sc_state |= NILFS_SEGCTOR_COMMIT;
  1893. }
  1894. spin_unlock(&sci->sc_state_lock);
  1895. }
  1896. static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
  1897. {
  1898. spin_lock(&sci->sc_state_lock);
  1899. if (!(sci->sc_flush_request & (1 << bn))) {
  1900. unsigned long prev_req = sci->sc_flush_request;
  1901. sci->sc_flush_request |= (1 << bn);
  1902. if (!prev_req)
  1903. wake_up(&sci->sc_wait_daemon);
  1904. }
  1905. spin_unlock(&sci->sc_state_lock);
  1906. }
  1907. /**
  1908. * nilfs_flush_segment - trigger a segment construction for resource control
  1909. * @sb: super block
  1910. * @ino: inode number of the file to be flushed out.
  1911. */
  1912. void nilfs_flush_segment(struct super_block *sb, ino_t ino)
  1913. {
  1914. struct nilfs_sb_info *sbi = NILFS_SB(sb);
  1915. struct nilfs_sc_info *sci = NILFS_SC(sbi);
  1916. if (!sci || nilfs_doing_construction())
  1917. return;
  1918. nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
  1919. /* assign bit 0 to data files */
  1920. }
  1921. struct nilfs_segctor_wait_request {
  1922. wait_queue_t wq;
  1923. __u32 seq;
  1924. int err;
  1925. atomic_t done;
  1926. };
  1927. static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
  1928. {
  1929. struct nilfs_segctor_wait_request wait_req;
  1930. int err = 0;
  1931. spin_lock(&sci->sc_state_lock);
  1932. init_wait(&wait_req.wq);
  1933. wait_req.err = 0;
  1934. atomic_set(&wait_req.done, 0);
  1935. wait_req.seq = ++sci->sc_seq_request;
  1936. spin_unlock(&sci->sc_state_lock);
  1937. init_waitqueue_entry(&wait_req.wq, current);
  1938. add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
  1939. set_current_state(TASK_INTERRUPTIBLE);
  1940. wake_up(&sci->sc_wait_daemon);
  1941. for (;;) {
  1942. if (atomic_read(&wait_req.done)) {
  1943. err = wait_req.err;
  1944. break;
  1945. }
  1946. if (!signal_pending(current)) {
  1947. schedule();
  1948. continue;
  1949. }
  1950. err = -ERESTARTSYS;
  1951. break;
  1952. }
  1953. finish_wait(&sci->sc_wait_request, &wait_req.wq);
  1954. return err;
  1955. }
  1956. static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
  1957. {
  1958. struct nilfs_segctor_wait_request *wrq, *n;
  1959. unsigned long flags;
  1960. spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
  1961. list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
  1962. wq.task_list) {
  1963. if (!atomic_read(&wrq->done) &&
  1964. nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
  1965. wrq->err = err;
  1966. atomic_set(&wrq->done, 1);
  1967. }
  1968. if (atomic_read(&wrq->done)) {
  1969. wrq->wq.func(&wrq->wq,
  1970. TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
  1971. 0, NULL);
  1972. }
  1973. }
  1974. spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
  1975. }
  1976. /**
  1977. * nilfs_construct_segment - construct a logical segment
  1978. * @sb: super block
  1979. *
  1980. * Return Value: On success, 0 is retured. On errors, one of the following
  1981. * negative error code is returned.
  1982. *
  1983. * %-EROFS - Read only filesystem.
  1984. *
  1985. * %-EIO - I/O error
  1986. *
  1987. * %-ENOSPC - No space left on device (only in a panic state).
  1988. *
  1989. * %-ERESTARTSYS - Interrupted.
  1990. *
  1991. * %-ENOMEM - Insufficient memory available.
  1992. */
  1993. int nilfs_construct_segment(struct super_block *sb)
  1994. {
  1995. struct nilfs_sb_info *sbi = NILFS_SB(sb);
  1996. struct nilfs_sc_info *sci = NILFS_SC(sbi);
  1997. struct nilfs_transaction_info *ti;
  1998. int err;
  1999. if (!sci)
  2000. return -EROFS;
  2001. /* A call inside transactions causes a deadlock. */
  2002. BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
  2003. err = nilfs_segctor_sync(sci);
  2004. return err;
  2005. }
  2006. /**
  2007. * nilfs_construct_dsync_segment - construct a data-only logical segment
  2008. * @sb: super block
  2009. * @inode: inode whose data blocks should be written out
  2010. * @start: start byte offset
  2011. * @end: end byte offset (inclusive)
  2012. *
  2013. * Return Value: On success, 0 is retured. On errors, one of the following
  2014. * negative error code is returned.
  2015. *
  2016. * %-EROFS - Read only filesystem.
  2017. *
  2018. * %-EIO - I/O error
  2019. *
  2020. * %-ENOSPC - No space left on device (only in a panic state).
  2021. *
  2022. * %-ERESTARTSYS - Interrupted.
  2023. *
  2024. * %-ENOMEM - Insufficient memory available.
  2025. */
  2026. int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
  2027. loff_t start, loff_t end)
  2028. {
  2029. struct nilfs_sb_info *sbi = NILFS_SB(sb);
  2030. struct nilfs_sc_info *sci = NILFS_SC(sbi);
  2031. struct nilfs_inode_info *ii;
  2032. struct nilfs_transaction_info ti;
  2033. int err = 0;
  2034. if (!sci)
  2035. return -EROFS;
  2036. nilfs_transaction_lock(sbi, &ti, 0);
  2037. ii = NILFS_I(inode);
  2038. if (test_bit(NILFS_I_INODE_DIRTY, &ii->i_state) ||
  2039. nilfs_test_opt(sbi, STRICT_ORDER) ||
  2040. test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
  2041. nilfs_discontinued(sbi->s_nilfs)) {
  2042. nilfs_transaction_unlock(sbi);
  2043. err = nilfs_segctor_sync(sci);
  2044. return err;
  2045. }
  2046. spin_lock(&sbi->s_inode_lock);
  2047. if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
  2048. !test_bit(NILFS_I_BUSY, &ii->i_state)) {
  2049. spin_unlock(&sbi->s_inode_lock);
  2050. nilfs_transaction_unlock(sbi);
  2051. return 0;
  2052. }
  2053. spin_unlock(&sbi->s_inode_lock);
  2054. sci->sc_dsync_inode = ii;
  2055. sci->sc_dsync_start = start;
  2056. sci->sc_dsync_end = end;
  2057. err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
  2058. nilfs_transaction_unlock(sbi);
  2059. return err;
  2060. }
  2061. #define FLUSH_FILE_BIT (0x1) /* data file only */
  2062. #define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
  2063. /**
  2064. * nilfs_segctor_accept - record accepted sequence count of log-write requests
  2065. * @sci: segment constructor object
  2066. */
  2067. static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
  2068. {
  2069. spin_lock(&sci->sc_state_lock);
  2070. sci->sc_seq_accepted = sci->sc_seq_request;
  2071. spin_unlock(&sci->sc_state_lock);
  2072. del_timer_sync(&sci->sc_timer);
  2073. }
  2074. /**
  2075. * nilfs_segctor_notify - notify the result of request to caller threads
  2076. * @sci: segment constructor object
  2077. * @mode: mode of log forming
  2078. * @err: error code to be notified
  2079. */
  2080. static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
  2081. {
  2082. /* Clear requests (even when the construction failed) */
  2083. spin_lock(&sci->sc_state_lock);
  2084. if (mode == SC_LSEG_SR) {
  2085. sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
  2086. sci->sc_seq_done = sci->sc_seq_accepted;
  2087. nilfs_segctor_wakeup(sci, err);
  2088. sci->sc_flush_request = 0;
  2089. } else {
  2090. if (mode == SC_FLUSH_FILE)
  2091. sci->sc_flush_request &= ~FLUSH_FILE_BIT;
  2092. else if (mode == SC_FLUSH_DAT)
  2093. sci->sc_flush_request &= ~FLUSH_DAT_BIT;
  2094. /* re-enable timer if checkpoint creation was not done */
  2095. if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
  2096. time_before(jiffies, sci->sc_timer.expires))
  2097. add_timer(&sci->sc_timer);
  2098. }
  2099. spin_unlock(&sci->sc_state_lock);
  2100. }
  2101. /**
  2102. * nilfs_segctor_construct - form logs and write them to disk
  2103. * @sci: segment constructor object
  2104. * @mode: mode of log forming
  2105. */
  2106. static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
  2107. {
  2108. struct nilfs_sb_info *sbi = sci->sc_sbi;
  2109. struct the_nilfs *nilfs = sbi->s_nilfs;
  2110. struct nilfs_super_block **sbp;
  2111. int err = 0;
  2112. nilfs_segctor_accept(sci);
  2113. if (nilfs_discontinued(nilfs))
  2114. mode = SC_LSEG_SR;
  2115. if (!nilfs_segctor_confirm(sci))
  2116. err = nilfs_segctor_do_construct(sci, mode);
  2117. if (likely(!err)) {
  2118. if (mode != SC_FLUSH_DAT)
  2119. atomic_set(&nilfs->ns_ndirtyblks, 0);
  2120. if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
  2121. nilfs_discontinued(nilfs)) {
  2122. down_write(&nilfs->ns_sem);
  2123. err = -EIO;
  2124. sbp = nilfs_prepare_super(sbi,
  2125. nilfs_sb_will_flip(nilfs));
  2126. if (likely(sbp)) {
  2127. nilfs_set_log_cursor(sbp[0], nilfs);
  2128. err = nilfs_commit_super(sbi, NILFS_SB_COMMIT);
  2129. }
  2130. up_write(&nilfs->ns_sem);
  2131. }
  2132. }
  2133. nilfs_segctor_notify(sci, mode, err);
  2134. return err;
  2135. }
  2136. static void nilfs_construction_timeout(unsigned long data)
  2137. {
  2138. struct task_struct *p = (struct task_struct *)data;
  2139. wake_up_process(p);
  2140. }
  2141. static void
  2142. nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
  2143. {
  2144. struct nilfs_inode_info *ii, *n;
  2145. list_for_each_entry_safe(ii, n, head, i_dirty) {
  2146. if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
  2147. continue;
  2148. list_del_init(&ii->i_dirty);
  2149. iput(&ii->vfs_inode);
  2150. }
  2151. }
  2152. int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
  2153. void **kbufs)
  2154. {
  2155. struct nilfs_sb_info *sbi = NILFS_SB(sb);
  2156. struct nilfs_sc_info *sci = NILFS_SC(sbi);
  2157. struct the_nilfs *nilfs = sbi->s_nilfs;
  2158. struct nilfs_transaction_info ti;
  2159. int err;
  2160. if (unlikely(!sci))
  2161. return -EROFS;
  2162. nilfs_transaction_lock(sbi, &ti, 1);
  2163. err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
  2164. if (unlikely(err))
  2165. goto out_unlock;
  2166. err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
  2167. if (unlikely(err)) {
  2168. nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
  2169. goto out_unlock;
  2170. }
  2171. sci->sc_freesegs = kbufs[4];
  2172. sci->sc_nfreesegs = argv[4].v_nmembs;
  2173. list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
  2174. for (;;) {
  2175. err = nilfs_segctor_construct(sci, SC_LSEG_SR);
  2176. nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
  2177. if (likely(!err))
  2178. break;
  2179. nilfs_warning(sb, __func__,
  2180. "segment construction failed. (err=%d)", err);
  2181. set_current_state(TASK_INTERRUPTIBLE);
  2182. schedule_timeout(sci->sc_interval);
  2183. }
  2184. if (nilfs_test_opt(sbi, DISCARD)) {
  2185. int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
  2186. sci->sc_nfreesegs);
  2187. if (ret) {
  2188. printk(KERN_WARNING
  2189. "NILFS warning: error %d on discard request, "
  2190. "turning discards off for the device\n", ret);
  2191. nilfs_clear_opt(sbi, DISCARD);
  2192. }
  2193. }
  2194. out_unlock:
  2195. sci->sc_freesegs = NULL;
  2196. sci->sc_nfreesegs = 0;
  2197. nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
  2198. nilfs_transaction_unlock(sbi);
  2199. return err;
  2200. }
  2201. static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
  2202. {
  2203. struct nilfs_sb_info *sbi = sci->sc_sbi;
  2204. struct nilfs_transaction_info ti;
  2205. nilfs_transaction_lock(sbi, &ti, 0);
  2206. nilfs_segctor_construct(sci, mode);
  2207. /*
  2208. * Unclosed segment should be retried. We do this using sc_timer.
  2209. * Timeout of sc_timer will invoke complete construction which leads
  2210. * to close the current logical segment.
  2211. */
  2212. if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
  2213. nilfs_segctor_start_timer(sci);
  2214. nilfs_transaction_unlock(sbi);
  2215. }
  2216. static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
  2217. {
  2218. int mode = 0;
  2219. int err;
  2220. spin_lock(&sci->sc_state_lock);
  2221. mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
  2222. SC_FLUSH_DAT : SC_FLUSH_FILE;
  2223. spin_unlock(&sci->sc_state_lock);
  2224. if (mode) {
  2225. err = nilfs_segctor_do_construct(sci, mode);
  2226. spin_lock(&sci->sc_state_lock);
  2227. sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
  2228. ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
  2229. spin_unlock(&sci->sc_state_lock);
  2230. }
  2231. clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
  2232. }
  2233. static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
  2234. {
  2235. if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
  2236. time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
  2237. if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
  2238. return SC_FLUSH_FILE;
  2239. else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
  2240. return SC_FLUSH_DAT;
  2241. }
  2242. return SC_LSEG_SR;
  2243. }
  2244. /**
  2245. * nilfs_segctor_thread - main loop of the segment constructor thread.
  2246. * @arg: pointer to a struct nilfs_sc_info.
  2247. *
  2248. * nilfs_segctor_thread() initializes a timer and serves as a daemon
  2249. * to execute segment constructions.
  2250. */
  2251. static int nilfs_segctor_thread(void *arg)
  2252. {
  2253. struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
  2254. struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs;
  2255. int timeout = 0;
  2256. sci->sc_timer.data = (unsigned long)current;
  2257. sci->sc_timer.function = nilfs_construction_timeout;
  2258. /* start sync. */
  2259. sci->sc_task = current;
  2260. wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
  2261. printk(KERN_INFO
  2262. "segctord starting. Construction interval = %lu seconds, "
  2263. "CP frequency < %lu seconds\n",
  2264. sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
  2265. spin_lock(&sci->sc_state_lock);
  2266. loop:
  2267. for (;;) {
  2268. int mode;
  2269. if (sci->sc_state & NILFS_SEGCTOR_QUIT)
  2270. goto end_thread;
  2271. if (timeout || sci->sc_seq_request != sci->sc_seq_done)
  2272. mode = SC_LSEG_SR;
  2273. else if (!sci->sc_flush_request)
  2274. break;
  2275. else
  2276. mode = nilfs_segctor_flush_mode(sci);
  2277. spin_unlock(&sci->sc_state_lock);
  2278. nilfs_segctor_thread_construct(sci, mode);
  2279. spin_lock(&sci->sc_state_lock);
  2280. timeout = 0;
  2281. }
  2282. if (freezing(current)) {
  2283. spin_unlock(&sci->sc_state_lock);
  2284. refrigerator();
  2285. spin_lock(&sci->sc_state_lock);
  2286. } else {
  2287. DEFINE_WAIT(wait);
  2288. int should_sleep = 1;
  2289. prepare_to_wait(&sci->sc_wait_daemon, &wait,
  2290. TASK_INTERRUPTIBLE);
  2291. if (sci->sc_seq_request != sci->sc_seq_done)
  2292. should_sleep = 0;
  2293. else if (sci->sc_flush_request)
  2294. should_sleep = 0;
  2295. else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
  2296. should_sleep = time_before(jiffies,
  2297. sci->sc_timer.expires);
  2298. if (should_sleep) {
  2299. spin_unlock(&sci->sc_state_lock);
  2300. schedule();
  2301. spin_lock(&sci->sc_state_lock);
  2302. }
  2303. finish_wait(&sci->sc_wait_daemon, &wait);
  2304. timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
  2305. time_after_eq(jiffies, sci->sc_timer.expires));
  2306. if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
  2307. set_nilfs_discontinued(nilfs);
  2308. }
  2309. goto loop;
  2310. end_thread:
  2311. spin_unlock(&sci->sc_state_lock);
  2312. /* end sync. */
  2313. sci->sc_task = NULL;
  2314. wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
  2315. return 0;
  2316. }
  2317. static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
  2318. {
  2319. struct task_struct *t;
  2320. t = kthread_run(nilfs_segctor_thread, sci, "segctord");
  2321. if (IS_ERR(t)) {
  2322. int err = PTR_ERR(t);
  2323. printk(KERN_ERR "NILFS: error %d creating segctord thread\n",
  2324. err);
  2325. return err;
  2326. }
  2327. wait_event(sci->sc_wait_task, sci->sc_task != NULL);
  2328. return 0;
  2329. }
  2330. static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
  2331. __acquires(&sci->sc_state_lock)
  2332. __releases(&sci->sc_state_lock)
  2333. {
  2334. sci->sc_state |= NILFS_SEGCTOR_QUIT;
  2335. while (sci->sc_task) {
  2336. wake_up(&sci->sc_wait_daemon);
  2337. spin_unlock(&sci->sc_state_lock);
  2338. wait_event(sci->sc_wait_task, sci->sc_task == NULL);
  2339. spin_lock(&sci->sc_state_lock);
  2340. }
  2341. }
  2342. /*
  2343. * Setup & clean-up functions
  2344. */
  2345. static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi,
  2346. struct nilfs_root *root)
  2347. {
  2348. struct nilfs_sc_info *sci;
  2349. sci = kzalloc(sizeof(*sci), GFP_KERNEL);
  2350. if (!sci)
  2351. return NULL;
  2352. sci->sc_sbi = sbi;
  2353. sci->sc_super = sbi->s_super;
  2354. nilfs_get_root(root);
  2355. sci->sc_root = root;
  2356. init_waitqueue_head(&sci->sc_wait_request);
  2357. init_waitqueue_head(&sci->sc_wait_daemon);
  2358. init_waitqueue_head(&sci->sc_wait_task);
  2359. spin_lock_init(&sci->sc_state_lock);
  2360. INIT_LIST_HEAD(&sci->sc_dirty_files);
  2361. INIT_LIST_HEAD(&sci->sc_segbufs);
  2362. INIT_LIST_HEAD(&sci->sc_write_logs);
  2363. INIT_LIST_HEAD(&sci->sc_gc_inodes);
  2364. INIT_LIST_HEAD(&sci->sc_copied_buffers);
  2365. init_timer(&sci->sc_timer);
  2366. sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
  2367. sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
  2368. sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
  2369. if (sbi->s_interval)
  2370. sci->sc_interval = sbi->s_interval;
  2371. if (sbi->s_watermark)
  2372. sci->sc_watermark = sbi->s_watermark;
  2373. return sci;
  2374. }
  2375. static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
  2376. {
  2377. int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
  2378. /* The segctord thread was stopped and its timer was removed.
  2379. But some tasks remain. */
  2380. do {
  2381. struct nilfs_sb_info *sbi = sci->sc_sbi;
  2382. struct nilfs_transaction_info ti;
  2383. nilfs_transaction_lock(sbi, &ti, 0);
  2384. ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
  2385. nilfs_transaction_unlock(sbi);
  2386. } while (ret && retrycount-- > 0);
  2387. }
  2388. /**
  2389. * nilfs_segctor_destroy - destroy the segment constructor.
  2390. * @sci: nilfs_sc_info
  2391. *
  2392. * nilfs_segctor_destroy() kills the segctord thread and frees
  2393. * the nilfs_sc_info struct.
  2394. * Caller must hold the segment semaphore.
  2395. */
  2396. static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
  2397. {
  2398. struct nilfs_sb_info *sbi = sci->sc_sbi;
  2399. int flag;
  2400. up_write(&sbi->s_nilfs->ns_segctor_sem);
  2401. spin_lock(&sci->sc_state_lock);
  2402. nilfs_segctor_kill_thread(sci);
  2403. flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
  2404. || sci->sc_seq_request != sci->sc_seq_done);
  2405. spin_unlock(&sci->sc_state_lock);
  2406. if (flag || !nilfs_segctor_confirm(sci))
  2407. nilfs_segctor_write_out(sci);
  2408. WARN_ON(!list_empty(&sci->sc_copied_buffers));
  2409. if (!list_empty(&sci->sc_dirty_files)) {
  2410. nilfs_warning(sbi->s_super, __func__,
  2411. "dirty file(s) after the final construction\n");
  2412. nilfs_dispose_list(sbi, &sci->sc_dirty_files, 1);
  2413. }
  2414. WARN_ON(!list_empty(&sci->sc_segbufs));
  2415. WARN_ON(!list_empty(&sci->sc_write_logs));
  2416. nilfs_put_root(sci->sc_root);
  2417. down_write(&sbi->s_nilfs->ns_segctor_sem);
  2418. del_timer_sync(&sci->sc_timer);
  2419. kfree(sci);
  2420. }
  2421. /**
  2422. * nilfs_attach_segment_constructor - attach a segment constructor
  2423. * @sbi: nilfs_sb_info
  2424. * @root: root object of the current filesystem tree
  2425. *
  2426. * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info,
  2427. * initializes it, and starts the segment constructor.
  2428. *
  2429. * Return Value: On success, 0 is returned. On error, one of the following
  2430. * negative error code is returned.
  2431. *
  2432. * %-ENOMEM - Insufficient memory available.
  2433. */
  2434. int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi,
  2435. struct nilfs_root *root)
  2436. {
  2437. int err;
  2438. if (NILFS_SC(sbi)) {
  2439. /*
  2440. * This happens if the filesystem was remounted
  2441. * read/write after nilfs_error degenerated it into a
  2442. * read-only mount.
  2443. */
  2444. nilfs_detach_segment_constructor(sbi);
  2445. }
  2446. sbi->s_sc_info = nilfs_segctor_new(sbi, root);
  2447. if (!sbi->s_sc_info)
  2448. return -ENOMEM;
  2449. err = nilfs_segctor_start_thread(NILFS_SC(sbi));
  2450. if (err) {
  2451. kfree(sbi->s_sc_info);
  2452. sbi->s_sc_info = NULL;
  2453. }
  2454. return err;
  2455. }
  2456. /**
  2457. * nilfs_detach_segment_constructor - destroy the segment constructor
  2458. * @sbi: nilfs_sb_info
  2459. *
  2460. * nilfs_detach_segment_constructor() kills the segment constructor daemon,
  2461. * frees the struct nilfs_sc_info, and destroy the dirty file list.
  2462. */
  2463. void nilfs_detach_segment_constructor(struct nilfs_sb_info *sbi)
  2464. {
  2465. struct the_nilfs *nilfs = sbi->s_nilfs;
  2466. LIST_HEAD(garbage_list);
  2467. down_write(&nilfs->ns_segctor_sem);
  2468. if (NILFS_SC(sbi)) {
  2469. nilfs_segctor_destroy(NILFS_SC(sbi));
  2470. sbi->s_sc_info = NULL;
  2471. }
  2472. /* Force to free the list of dirty files */
  2473. spin_lock(&sbi->s_inode_lock);
  2474. if (!list_empty(&sbi->s_dirty_files)) {
  2475. list_splice_init(&sbi->s_dirty_files, &garbage_list);
  2476. nilfs_warning(sbi->s_super, __func__,
  2477. "Non empty dirty list after the last "
  2478. "segment construction\n");
  2479. }
  2480. spin_unlock(&sbi->s_inode_lock);
  2481. up_write(&nilfs->ns_segctor_sem);
  2482. nilfs_dispose_list(sbi, &garbage_list, 1);
  2483. }