journal.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * journal.c
  5. *
  6. * Defines functions of journalling api
  7. *
  8. * Copyright (C) 2003, 2004 Oracle. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2 of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public
  21. * License along with this program; if not, write to the
  22. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23. * Boston, MA 021110-1307, USA.
  24. */
  25. #include <linux/fs.h>
  26. #include <linux/types.h>
  27. #include <linux/slab.h>
  28. #include <linux/highmem.h>
  29. #include <linux/kthread.h>
  30. #include <linux/time.h>
  31. #include <linux/random.h>
  32. #define MLOG_MASK_PREFIX ML_JOURNAL
  33. #include <cluster/masklog.h>
  34. #include "ocfs2.h"
  35. #include "alloc.h"
  36. #include "blockcheck.h"
  37. #include "dir.h"
  38. #include "dlmglue.h"
  39. #include "extent_map.h"
  40. #include "heartbeat.h"
  41. #include "inode.h"
  42. #include "journal.h"
  43. #include "localalloc.h"
  44. #include "slot_map.h"
  45. #include "super.h"
  46. #include "sysfile.h"
  47. #include "uptodate.h"
  48. #include "quota.h"
  49. #include "buffer_head_io.h"
  50. DEFINE_SPINLOCK(trans_inc_lock);
  51. #define ORPHAN_SCAN_SCHEDULE_TIMEOUT 300000
  52. static int ocfs2_force_read_journal(struct inode *inode);
  53. static int ocfs2_recover_node(struct ocfs2_super *osb,
  54. int node_num, int slot_num);
  55. static int __ocfs2_recovery_thread(void *arg);
  56. static int ocfs2_commit_cache(struct ocfs2_super *osb);
  57. static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota);
  58. static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
  59. int dirty, int replayed);
  60. static int ocfs2_trylock_journal(struct ocfs2_super *osb,
  61. int slot_num);
  62. static int ocfs2_recover_orphans(struct ocfs2_super *osb,
  63. int slot);
  64. static int ocfs2_commit_thread(void *arg);
  65. static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
  66. int slot_num,
  67. struct ocfs2_dinode *la_dinode,
  68. struct ocfs2_dinode *tl_dinode,
  69. struct ocfs2_quota_recovery *qrec);
  70. static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb)
  71. {
  72. return __ocfs2_wait_on_mount(osb, 0);
  73. }
  74. static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb)
  75. {
  76. return __ocfs2_wait_on_mount(osb, 1);
  77. }
  78. /*
  79. * This replay_map is to track online/offline slots, so we could recover
  80. * offline slots during recovery and mount
  81. */
  82. enum ocfs2_replay_state {
  83. REPLAY_UNNEEDED = 0, /* Replay is not needed, so ignore this map */
  84. REPLAY_NEEDED, /* Replay slots marked in rm_replay_slots */
  85. REPLAY_DONE /* Replay was already queued */
  86. };
  87. struct ocfs2_replay_map {
  88. unsigned int rm_slots;
  89. enum ocfs2_replay_state rm_state;
  90. unsigned char rm_replay_slots[0];
  91. };
  92. void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state)
  93. {
  94. if (!osb->replay_map)
  95. return;
  96. /* If we've already queued the replay, we don't have any more to do */
  97. if (osb->replay_map->rm_state == REPLAY_DONE)
  98. return;
  99. osb->replay_map->rm_state = state;
  100. }
  101. int ocfs2_compute_replay_slots(struct ocfs2_super *osb)
  102. {
  103. struct ocfs2_replay_map *replay_map;
  104. int i, node_num;
  105. /* If replay map is already set, we don't do it again */
  106. if (osb->replay_map)
  107. return 0;
  108. replay_map = kzalloc(sizeof(struct ocfs2_replay_map) +
  109. (osb->max_slots * sizeof(char)), GFP_KERNEL);
  110. if (!replay_map) {
  111. mlog_errno(-ENOMEM);
  112. return -ENOMEM;
  113. }
  114. spin_lock(&osb->osb_lock);
  115. replay_map->rm_slots = osb->max_slots;
  116. replay_map->rm_state = REPLAY_UNNEEDED;
  117. /* set rm_replay_slots for offline slot(s) */
  118. for (i = 0; i < replay_map->rm_slots; i++) {
  119. if (ocfs2_slot_to_node_num_locked(osb, i, &node_num) == -ENOENT)
  120. replay_map->rm_replay_slots[i] = 1;
  121. }
  122. osb->replay_map = replay_map;
  123. spin_unlock(&osb->osb_lock);
  124. return 0;
  125. }
  126. void ocfs2_queue_replay_slots(struct ocfs2_super *osb)
  127. {
  128. struct ocfs2_replay_map *replay_map = osb->replay_map;
  129. int i;
  130. if (!replay_map)
  131. return;
  132. if (replay_map->rm_state != REPLAY_NEEDED)
  133. return;
  134. for (i = 0; i < replay_map->rm_slots; i++)
  135. if (replay_map->rm_replay_slots[i])
  136. ocfs2_queue_recovery_completion(osb->journal, i, NULL,
  137. NULL, NULL);
  138. replay_map->rm_state = REPLAY_DONE;
  139. }
  140. void ocfs2_free_replay_slots(struct ocfs2_super *osb)
  141. {
  142. struct ocfs2_replay_map *replay_map = osb->replay_map;
  143. if (!osb->replay_map)
  144. return;
  145. kfree(replay_map);
  146. osb->replay_map = NULL;
  147. }
  148. int ocfs2_recovery_init(struct ocfs2_super *osb)
  149. {
  150. struct ocfs2_recovery_map *rm;
  151. mutex_init(&osb->recovery_lock);
  152. osb->disable_recovery = 0;
  153. osb->recovery_thread_task = NULL;
  154. init_waitqueue_head(&osb->recovery_event);
  155. rm = kzalloc(sizeof(struct ocfs2_recovery_map) +
  156. osb->max_slots * sizeof(unsigned int),
  157. GFP_KERNEL);
  158. if (!rm) {
  159. mlog_errno(-ENOMEM);
  160. return -ENOMEM;
  161. }
  162. rm->rm_entries = (unsigned int *)((char *)rm +
  163. sizeof(struct ocfs2_recovery_map));
  164. osb->recovery_map = rm;
  165. return 0;
  166. }
  167. /* we can't grab the goofy sem lock from inside wait_event, so we use
  168. * memory barriers to make sure that we'll see the null task before
  169. * being woken up */
  170. static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
  171. {
  172. mb();
  173. return osb->recovery_thread_task != NULL;
  174. }
  175. void ocfs2_recovery_exit(struct ocfs2_super *osb)
  176. {
  177. struct ocfs2_recovery_map *rm;
  178. /* disable any new recovery threads and wait for any currently
  179. * running ones to exit. Do this before setting the vol_state. */
  180. mutex_lock(&osb->recovery_lock);
  181. osb->disable_recovery = 1;
  182. mutex_unlock(&osb->recovery_lock);
  183. wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
  184. /* At this point, we know that no more recovery threads can be
  185. * launched, so wait for any recovery completion work to
  186. * complete. */
  187. flush_workqueue(ocfs2_wq);
  188. /*
  189. * Now that recovery is shut down, and the osb is about to be
  190. * freed, the osb_lock is not taken here.
  191. */
  192. rm = osb->recovery_map;
  193. /* XXX: Should we bug if there are dirty entries? */
  194. kfree(rm);
  195. }
  196. static int __ocfs2_recovery_map_test(struct ocfs2_super *osb,
  197. unsigned int node_num)
  198. {
  199. int i;
  200. struct ocfs2_recovery_map *rm = osb->recovery_map;
  201. assert_spin_locked(&osb->osb_lock);
  202. for (i = 0; i < rm->rm_used; i++) {
  203. if (rm->rm_entries[i] == node_num)
  204. return 1;
  205. }
  206. return 0;
  207. }
  208. /* Behaves like test-and-set. Returns the previous value */
  209. static int ocfs2_recovery_map_set(struct ocfs2_super *osb,
  210. unsigned int node_num)
  211. {
  212. struct ocfs2_recovery_map *rm = osb->recovery_map;
  213. spin_lock(&osb->osb_lock);
  214. if (__ocfs2_recovery_map_test(osb, node_num)) {
  215. spin_unlock(&osb->osb_lock);
  216. return 1;
  217. }
  218. /* XXX: Can this be exploited? Not from o2dlm... */
  219. BUG_ON(rm->rm_used >= osb->max_slots);
  220. rm->rm_entries[rm->rm_used] = node_num;
  221. rm->rm_used++;
  222. spin_unlock(&osb->osb_lock);
  223. return 0;
  224. }
  225. static void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
  226. unsigned int node_num)
  227. {
  228. int i;
  229. struct ocfs2_recovery_map *rm = osb->recovery_map;
  230. spin_lock(&osb->osb_lock);
  231. for (i = 0; i < rm->rm_used; i++) {
  232. if (rm->rm_entries[i] == node_num)
  233. break;
  234. }
  235. if (i < rm->rm_used) {
  236. /* XXX: be careful with the pointer math */
  237. memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]),
  238. (rm->rm_used - i - 1) * sizeof(unsigned int));
  239. rm->rm_used--;
  240. }
  241. spin_unlock(&osb->osb_lock);
  242. }
  243. static int ocfs2_commit_cache(struct ocfs2_super *osb)
  244. {
  245. int status = 0;
  246. unsigned int flushed;
  247. struct ocfs2_journal *journal = NULL;
  248. mlog_entry_void();
  249. journal = osb->journal;
  250. /* Flush all pending commits and checkpoint the journal. */
  251. down_write(&journal->j_trans_barrier);
  252. if (atomic_read(&journal->j_num_trans) == 0) {
  253. up_write(&journal->j_trans_barrier);
  254. mlog(0, "No transactions for me to flush!\n");
  255. goto finally;
  256. }
  257. jbd2_journal_lock_updates(journal->j_journal);
  258. status = jbd2_journal_flush(journal->j_journal);
  259. jbd2_journal_unlock_updates(journal->j_journal);
  260. if (status < 0) {
  261. up_write(&journal->j_trans_barrier);
  262. mlog_errno(status);
  263. goto finally;
  264. }
  265. ocfs2_inc_trans_id(journal);
  266. flushed = atomic_read(&journal->j_num_trans);
  267. atomic_set(&journal->j_num_trans, 0);
  268. up_write(&journal->j_trans_barrier);
  269. mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n",
  270. journal->j_trans_id, flushed);
  271. ocfs2_wake_downconvert_thread(osb);
  272. wake_up(&journal->j_checkpointed);
  273. finally:
  274. mlog_exit(status);
  275. return status;
  276. }
  277. handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
  278. {
  279. journal_t *journal = osb->journal->j_journal;
  280. handle_t *handle;
  281. BUG_ON(!osb || !osb->journal->j_journal);
  282. if (ocfs2_is_hard_readonly(osb))
  283. return ERR_PTR(-EROFS);
  284. BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE);
  285. BUG_ON(max_buffs <= 0);
  286. /* Nested transaction? Just return the handle... */
  287. if (journal_current_handle())
  288. return jbd2_journal_start(journal, max_buffs);
  289. down_read(&osb->journal->j_trans_barrier);
  290. handle = jbd2_journal_start(journal, max_buffs);
  291. if (IS_ERR(handle)) {
  292. up_read(&osb->journal->j_trans_barrier);
  293. mlog_errno(PTR_ERR(handle));
  294. if (is_journal_aborted(journal)) {
  295. ocfs2_abort(osb->sb, "Detected aborted journal");
  296. handle = ERR_PTR(-EROFS);
  297. }
  298. } else {
  299. if (!ocfs2_mount_local(osb))
  300. atomic_inc(&(osb->journal->j_num_trans));
  301. }
  302. return handle;
  303. }
  304. int ocfs2_commit_trans(struct ocfs2_super *osb,
  305. handle_t *handle)
  306. {
  307. int ret, nested;
  308. struct ocfs2_journal *journal = osb->journal;
  309. BUG_ON(!handle);
  310. nested = handle->h_ref > 1;
  311. ret = jbd2_journal_stop(handle);
  312. if (ret < 0)
  313. mlog_errno(ret);
  314. if (!nested)
  315. up_read(&journal->j_trans_barrier);
  316. return ret;
  317. }
  318. /*
  319. * 'nblocks' is what you want to add to the current transaction.
  320. *
  321. * This might call jbd2_journal_restart() which will commit dirty buffers
  322. * and then restart the transaction. Before calling
  323. * ocfs2_extend_trans(), any changed blocks should have been
  324. * dirtied. After calling it, all blocks which need to be changed must
  325. * go through another set of journal_access/journal_dirty calls.
  326. *
  327. * WARNING: This will not release any semaphores or disk locks taken
  328. * during the transaction, so make sure they were taken *before*
  329. * start_trans or we'll have ordering deadlocks.
  330. *
  331. * WARNING2: Note that we do *not* drop j_trans_barrier here. This is
  332. * good because transaction ids haven't yet been recorded on the
  333. * cluster locks associated with this handle.
  334. */
  335. int ocfs2_extend_trans(handle_t *handle, int nblocks)
  336. {
  337. int status, old_nblocks;
  338. BUG_ON(!handle);
  339. BUG_ON(nblocks < 0);
  340. if (!nblocks)
  341. return 0;
  342. old_nblocks = handle->h_buffer_credits;
  343. mlog_entry_void();
  344. mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
  345. #ifdef CONFIG_OCFS2_DEBUG_FS
  346. status = 1;
  347. #else
  348. status = jbd2_journal_extend(handle, nblocks);
  349. if (status < 0) {
  350. mlog_errno(status);
  351. goto bail;
  352. }
  353. #endif
  354. if (status > 0) {
  355. mlog(0,
  356. "jbd2_journal_extend failed, trying "
  357. "jbd2_journal_restart\n");
  358. status = jbd2_journal_restart(handle,
  359. old_nblocks + nblocks);
  360. if (status < 0) {
  361. mlog_errno(status);
  362. goto bail;
  363. }
  364. }
  365. status = 0;
  366. bail:
  367. mlog_exit(status);
  368. return status;
  369. }
  370. struct ocfs2_triggers {
  371. struct jbd2_buffer_trigger_type ot_triggers;
  372. int ot_offset;
  373. };
  374. static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers)
  375. {
  376. return container_of(triggers, struct ocfs2_triggers, ot_triggers);
  377. }
  378. static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
  379. struct buffer_head *bh,
  380. void *data, size_t size)
  381. {
  382. struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
  383. /*
  384. * We aren't guaranteed to have the superblock here, so we
  385. * must unconditionally compute the ecc data.
  386. * __ocfs2_journal_access() will only set the triggers if
  387. * metaecc is enabled.
  388. */
  389. ocfs2_block_check_compute(data, size, data + ot->ot_offset);
  390. }
  391. /*
  392. * Quota blocks have their own trigger because the struct ocfs2_block_check
  393. * offset depends on the blocksize.
  394. */
  395. static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
  396. struct buffer_head *bh,
  397. void *data, size_t size)
  398. {
  399. struct ocfs2_disk_dqtrailer *dqt =
  400. ocfs2_block_dqtrailer(size, data);
  401. /*
  402. * We aren't guaranteed to have the superblock here, so we
  403. * must unconditionally compute the ecc data.
  404. * __ocfs2_journal_access() will only set the triggers if
  405. * metaecc is enabled.
  406. */
  407. ocfs2_block_check_compute(data, size, &dqt->dq_check);
  408. }
  409. /*
  410. * Directory blocks also have their own trigger because the
  411. * struct ocfs2_block_check offset depends on the blocksize.
  412. */
  413. static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
  414. struct buffer_head *bh,
  415. void *data, size_t size)
  416. {
  417. struct ocfs2_dir_block_trailer *trailer =
  418. ocfs2_dir_trailer_from_size(size, data);
  419. /*
  420. * We aren't guaranteed to have the superblock here, so we
  421. * must unconditionally compute the ecc data.
  422. * __ocfs2_journal_access() will only set the triggers if
  423. * metaecc is enabled.
  424. */
  425. ocfs2_block_check_compute(data, size, &trailer->db_check);
  426. }
  427. static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
  428. struct buffer_head *bh)
  429. {
  430. mlog(ML_ERROR,
  431. "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, "
  432. "bh->b_blocknr = %llu\n",
  433. (unsigned long)bh,
  434. (unsigned long long)bh->b_blocknr);
  435. /* We aren't guaranteed to have the superblock here - but if we
  436. * don't, it'll just crash. */
  437. ocfs2_error(bh->b_assoc_map->host->i_sb,
  438. "JBD2 has aborted our journal, ocfs2 cannot continue\n");
  439. }
  440. static struct ocfs2_triggers di_triggers = {
  441. .ot_triggers = {
  442. .t_frozen = ocfs2_frozen_trigger,
  443. .t_abort = ocfs2_abort_trigger,
  444. },
  445. .ot_offset = offsetof(struct ocfs2_dinode, i_check),
  446. };
  447. static struct ocfs2_triggers eb_triggers = {
  448. .ot_triggers = {
  449. .t_frozen = ocfs2_frozen_trigger,
  450. .t_abort = ocfs2_abort_trigger,
  451. },
  452. .ot_offset = offsetof(struct ocfs2_extent_block, h_check),
  453. };
  454. static struct ocfs2_triggers rb_triggers = {
  455. .ot_triggers = {
  456. .t_frozen = ocfs2_frozen_trigger,
  457. .t_abort = ocfs2_abort_trigger,
  458. },
  459. .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check),
  460. };
  461. static struct ocfs2_triggers gd_triggers = {
  462. .ot_triggers = {
  463. .t_frozen = ocfs2_frozen_trigger,
  464. .t_abort = ocfs2_abort_trigger,
  465. },
  466. .ot_offset = offsetof(struct ocfs2_group_desc, bg_check),
  467. };
  468. static struct ocfs2_triggers db_triggers = {
  469. .ot_triggers = {
  470. .t_frozen = ocfs2_db_frozen_trigger,
  471. .t_abort = ocfs2_abort_trigger,
  472. },
  473. };
  474. static struct ocfs2_triggers xb_triggers = {
  475. .ot_triggers = {
  476. .t_frozen = ocfs2_frozen_trigger,
  477. .t_abort = ocfs2_abort_trigger,
  478. },
  479. .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check),
  480. };
  481. static struct ocfs2_triggers dq_triggers = {
  482. .ot_triggers = {
  483. .t_frozen = ocfs2_dq_frozen_trigger,
  484. .t_abort = ocfs2_abort_trigger,
  485. },
  486. };
  487. static struct ocfs2_triggers dr_triggers = {
  488. .ot_triggers = {
  489. .t_frozen = ocfs2_frozen_trigger,
  490. .t_abort = ocfs2_abort_trigger,
  491. },
  492. .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check),
  493. };
  494. static struct ocfs2_triggers dl_triggers = {
  495. .ot_triggers = {
  496. .t_frozen = ocfs2_frozen_trigger,
  497. .t_abort = ocfs2_abort_trigger,
  498. },
  499. .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check),
  500. };
  501. static int __ocfs2_journal_access(handle_t *handle,
  502. struct ocfs2_caching_info *ci,
  503. struct buffer_head *bh,
  504. struct ocfs2_triggers *triggers,
  505. int type)
  506. {
  507. int status;
  508. struct ocfs2_super *osb =
  509. OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
  510. BUG_ON(!ci || !ci->ci_ops);
  511. BUG_ON(!handle);
  512. BUG_ON(!bh);
  513. mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
  514. (unsigned long long)bh->b_blocknr, type,
  515. (type == OCFS2_JOURNAL_ACCESS_CREATE) ?
  516. "OCFS2_JOURNAL_ACCESS_CREATE" :
  517. "OCFS2_JOURNAL_ACCESS_WRITE",
  518. bh->b_size);
  519. /* we can safely remove this assertion after testing. */
  520. if (!buffer_uptodate(bh)) {
  521. mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
  522. mlog(ML_ERROR, "b_blocknr=%llu\n",
  523. (unsigned long long)bh->b_blocknr);
  524. BUG();
  525. }
  526. /* Set the current transaction information on the ci so
  527. * that the locking code knows whether it can drop it's locks
  528. * on this ci or not. We're protected from the commit
  529. * thread updating the current transaction id until
  530. * ocfs2_commit_trans() because ocfs2_start_trans() took
  531. * j_trans_barrier for us. */
  532. ocfs2_set_ci_lock_trans(osb->journal, ci);
  533. ocfs2_metadata_cache_io_lock(ci);
  534. switch (type) {
  535. case OCFS2_JOURNAL_ACCESS_CREATE:
  536. case OCFS2_JOURNAL_ACCESS_WRITE:
  537. status = jbd2_journal_get_write_access(handle, bh);
  538. break;
  539. case OCFS2_JOURNAL_ACCESS_UNDO:
  540. status = jbd2_journal_get_undo_access(handle, bh);
  541. break;
  542. default:
  543. status = -EINVAL;
  544. mlog(ML_ERROR, "Unknown access type!\n");
  545. }
  546. if (!status && ocfs2_meta_ecc(osb) && triggers)
  547. jbd2_journal_set_triggers(bh, &triggers->ot_triggers);
  548. ocfs2_metadata_cache_io_unlock(ci);
  549. if (status < 0)
  550. mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
  551. status, type);
  552. mlog_exit(status);
  553. return status;
  554. }
  555. int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
  556. struct buffer_head *bh, int type)
  557. {
  558. return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type);
  559. }
  560. int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
  561. struct buffer_head *bh, int type)
  562. {
  563. return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type);
  564. }
  565. int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
  566. struct buffer_head *bh, int type)
  567. {
  568. return __ocfs2_journal_access(handle, ci, bh, &rb_triggers,
  569. type);
  570. }
  571. int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
  572. struct buffer_head *bh, int type)
  573. {
  574. return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type);
  575. }
  576. int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
  577. struct buffer_head *bh, int type)
  578. {
  579. return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type);
  580. }
  581. int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
  582. struct buffer_head *bh, int type)
  583. {
  584. return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type);
  585. }
  586. int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
  587. struct buffer_head *bh, int type)
  588. {
  589. return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type);
  590. }
  591. int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
  592. struct buffer_head *bh, int type)
  593. {
  594. return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type);
  595. }
  596. int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
  597. struct buffer_head *bh, int type)
  598. {
  599. return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type);
  600. }
  601. int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
  602. struct buffer_head *bh, int type)
  603. {
  604. return __ocfs2_journal_access(handle, ci, bh, NULL, type);
  605. }
  606. void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
  607. {
  608. int status;
  609. mlog_entry("(bh->b_blocknr=%llu)\n",
  610. (unsigned long long)bh->b_blocknr);
  611. status = jbd2_journal_dirty_metadata(handle, bh);
  612. BUG_ON(status);
  613. mlog_exit_void();
  614. }
  615. #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
  616. void ocfs2_set_journal_params(struct ocfs2_super *osb)
  617. {
  618. journal_t *journal = osb->journal->j_journal;
  619. unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL;
  620. if (osb->osb_commit_interval)
  621. commit_interval = osb->osb_commit_interval;
  622. write_lock(&journal->j_state_lock);
  623. journal->j_commit_interval = commit_interval;
  624. if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
  625. journal->j_flags |= JBD2_BARRIER;
  626. else
  627. journal->j_flags &= ~JBD2_BARRIER;
  628. write_unlock(&journal->j_state_lock);
  629. }
  630. int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
  631. {
  632. int status = -1;
  633. struct inode *inode = NULL; /* the journal inode */
  634. journal_t *j_journal = NULL;
  635. struct ocfs2_dinode *di = NULL;
  636. struct buffer_head *bh = NULL;
  637. struct ocfs2_super *osb;
  638. int inode_lock = 0;
  639. mlog_entry_void();
  640. BUG_ON(!journal);
  641. osb = journal->j_osb;
  642. /* already have the inode for our journal */
  643. inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
  644. osb->slot_num);
  645. if (inode == NULL) {
  646. status = -EACCES;
  647. mlog_errno(status);
  648. goto done;
  649. }
  650. if (is_bad_inode(inode)) {
  651. mlog(ML_ERROR, "access error (bad inode)\n");
  652. iput(inode);
  653. inode = NULL;
  654. status = -EACCES;
  655. goto done;
  656. }
  657. SET_INODE_JOURNAL(inode);
  658. OCFS2_I(inode)->ip_open_count++;
  659. /* Skip recovery waits here - journal inode metadata never
  660. * changes in a live cluster so it can be considered an
  661. * exception to the rule. */
  662. status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
  663. if (status < 0) {
  664. if (status != -ERESTARTSYS)
  665. mlog(ML_ERROR, "Could not get lock on journal!\n");
  666. goto done;
  667. }
  668. inode_lock = 1;
  669. di = (struct ocfs2_dinode *)bh->b_data;
  670. if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) {
  671. mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
  672. inode->i_size);
  673. status = -EINVAL;
  674. goto done;
  675. }
  676. mlog(0, "inode->i_size = %lld\n", inode->i_size);
  677. mlog(0, "inode->i_blocks = %llu\n",
  678. (unsigned long long)inode->i_blocks);
  679. mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
  680. /* call the kernels journal init function now */
  681. j_journal = jbd2_journal_init_inode(inode);
  682. if (j_journal == NULL) {
  683. mlog(ML_ERROR, "Linux journal layer error\n");
  684. status = -EINVAL;
  685. goto done;
  686. }
  687. mlog(0, "Returned from jbd2_journal_init_inode\n");
  688. mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen);
  689. *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
  690. OCFS2_JOURNAL_DIRTY_FL);
  691. journal->j_journal = j_journal;
  692. journal->j_inode = inode;
  693. journal->j_bh = bh;
  694. ocfs2_set_journal_params(osb);
  695. journal->j_state = OCFS2_JOURNAL_LOADED;
  696. status = 0;
  697. done:
  698. if (status < 0) {
  699. if (inode_lock)
  700. ocfs2_inode_unlock(inode, 1);
  701. brelse(bh);
  702. if (inode) {
  703. OCFS2_I(inode)->ip_open_count--;
  704. iput(inode);
  705. }
  706. }
  707. mlog_exit(status);
  708. return status;
  709. }
  710. static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di)
  711. {
  712. le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1);
  713. }
  714. static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di)
  715. {
  716. return le32_to_cpu(di->id1.journal1.ij_recovery_generation);
  717. }
  718. static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
  719. int dirty, int replayed)
  720. {
  721. int status;
  722. unsigned int flags;
  723. struct ocfs2_journal *journal = osb->journal;
  724. struct buffer_head *bh = journal->j_bh;
  725. struct ocfs2_dinode *fe;
  726. mlog_entry_void();
  727. fe = (struct ocfs2_dinode *)bh->b_data;
  728. /* The journal bh on the osb always comes from ocfs2_journal_init()
  729. * and was validated there inside ocfs2_inode_lock_full(). It's a
  730. * code bug if we mess it up. */
  731. BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
  732. flags = le32_to_cpu(fe->id1.journal1.ij_flags);
  733. if (dirty)
  734. flags |= OCFS2_JOURNAL_DIRTY_FL;
  735. else
  736. flags &= ~OCFS2_JOURNAL_DIRTY_FL;
  737. fe->id1.journal1.ij_flags = cpu_to_le32(flags);
  738. if (replayed)
  739. ocfs2_bump_recovery_generation(fe);
  740. ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
  741. status = ocfs2_write_block(osb, bh, INODE_CACHE(journal->j_inode));
  742. if (status < 0)
  743. mlog_errno(status);
  744. mlog_exit(status);
  745. return status;
  746. }
  747. /*
  748. * If the journal has been kmalloc'd it needs to be freed after this
  749. * call.
  750. */
  751. void ocfs2_journal_shutdown(struct ocfs2_super *osb)
  752. {
  753. struct ocfs2_journal *journal = NULL;
  754. int status = 0;
  755. struct inode *inode = NULL;
  756. int num_running_trans = 0;
  757. mlog_entry_void();
  758. BUG_ON(!osb);
  759. journal = osb->journal;
  760. if (!journal)
  761. goto done;
  762. inode = journal->j_inode;
  763. if (journal->j_state != OCFS2_JOURNAL_LOADED)
  764. goto done;
  765. /* need to inc inode use count - jbd2_journal_destroy will iput. */
  766. if (!igrab(inode))
  767. BUG();
  768. num_running_trans = atomic_read(&(osb->journal->j_num_trans));
  769. if (num_running_trans > 0)
  770. mlog(0, "Shutting down journal: must wait on %d "
  771. "running transactions!\n",
  772. num_running_trans);
  773. /* Do a commit_cache here. It will flush our journal, *and*
  774. * release any locks that are still held.
  775. * set the SHUTDOWN flag and release the trans lock.
  776. * the commit thread will take the trans lock for us below. */
  777. journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN;
  778. /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not
  779. * drop the trans_lock (which we want to hold until we
  780. * completely destroy the journal. */
  781. if (osb->commit_task) {
  782. /* Wait for the commit thread */
  783. mlog(0, "Waiting for ocfs2commit to exit....\n");
  784. kthread_stop(osb->commit_task);
  785. osb->commit_task = NULL;
  786. }
  787. BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
  788. if (ocfs2_mount_local(osb)) {
  789. jbd2_journal_lock_updates(journal->j_journal);
  790. status = jbd2_journal_flush(journal->j_journal);
  791. jbd2_journal_unlock_updates(journal->j_journal);
  792. if (status < 0)
  793. mlog_errno(status);
  794. }
  795. if (status == 0) {
  796. /*
  797. * Do not toggle if flush was unsuccessful otherwise
  798. * will leave dirty metadata in a "clean" journal
  799. */
  800. status = ocfs2_journal_toggle_dirty(osb, 0, 0);
  801. if (status < 0)
  802. mlog_errno(status);
  803. }
  804. /* Shutdown the kernel journal system */
  805. jbd2_journal_destroy(journal->j_journal);
  806. journal->j_journal = NULL;
  807. OCFS2_I(inode)->ip_open_count--;
  808. /* unlock our journal */
  809. ocfs2_inode_unlock(inode, 1);
  810. brelse(journal->j_bh);
  811. journal->j_bh = NULL;
  812. journal->j_state = OCFS2_JOURNAL_FREE;
  813. // up_write(&journal->j_trans_barrier);
  814. done:
  815. if (inode)
  816. iput(inode);
  817. mlog_exit_void();
  818. }
  819. static void ocfs2_clear_journal_error(struct super_block *sb,
  820. journal_t *journal,
  821. int slot)
  822. {
  823. int olderr;
  824. olderr = jbd2_journal_errno(journal);
  825. if (olderr) {
  826. mlog(ML_ERROR, "File system error %d recorded in "
  827. "journal %u.\n", olderr, slot);
  828. mlog(ML_ERROR, "File system on device %s needs checking.\n",
  829. sb->s_id);
  830. jbd2_journal_ack_err(journal);
  831. jbd2_journal_clear_err(journal);
  832. }
  833. }
  834. int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
  835. {
  836. int status = 0;
  837. struct ocfs2_super *osb;
  838. mlog_entry_void();
  839. BUG_ON(!journal);
  840. osb = journal->j_osb;
  841. status = jbd2_journal_load(journal->j_journal);
  842. if (status < 0) {
  843. mlog(ML_ERROR, "Failed to load journal!\n");
  844. goto done;
  845. }
  846. ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
  847. status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
  848. if (status < 0) {
  849. mlog_errno(status);
  850. goto done;
  851. }
  852. /* Launch the commit thread */
  853. if (!local) {
  854. osb->commit_task = kthread_run(ocfs2_commit_thread, osb,
  855. "ocfs2cmt");
  856. if (IS_ERR(osb->commit_task)) {
  857. status = PTR_ERR(osb->commit_task);
  858. osb->commit_task = NULL;
  859. mlog(ML_ERROR, "unable to launch ocfs2commit thread, "
  860. "error=%d", status);
  861. goto done;
  862. }
  863. } else
  864. osb->commit_task = NULL;
  865. done:
  866. mlog_exit(status);
  867. return status;
  868. }
  869. /* 'full' flag tells us whether we clear out all blocks or if we just
  870. * mark the journal clean */
  871. int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
  872. {
  873. int status;
  874. mlog_entry_void();
  875. BUG_ON(!journal);
  876. status = jbd2_journal_wipe(journal->j_journal, full);
  877. if (status < 0) {
  878. mlog_errno(status);
  879. goto bail;
  880. }
  881. status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0);
  882. if (status < 0)
  883. mlog_errno(status);
  884. bail:
  885. mlog_exit(status);
  886. return status;
  887. }
  888. static int ocfs2_recovery_completed(struct ocfs2_super *osb)
  889. {
  890. int empty;
  891. struct ocfs2_recovery_map *rm = osb->recovery_map;
  892. spin_lock(&osb->osb_lock);
  893. empty = (rm->rm_used == 0);
  894. spin_unlock(&osb->osb_lock);
  895. return empty;
  896. }
  897. void ocfs2_wait_for_recovery(struct ocfs2_super *osb)
  898. {
  899. wait_event(osb->recovery_event, ocfs2_recovery_completed(osb));
  900. }
  901. /*
  902. * JBD Might read a cached version of another nodes journal file. We
  903. * don't want this as this file changes often and we get no
  904. * notification on those changes. The only way to be sure that we've
  905. * got the most up to date version of those blocks then is to force
  906. * read them off disk. Just searching through the buffer cache won't
  907. * work as there may be pages backing this file which are still marked
  908. * up to date. We know things can't change on this file underneath us
  909. * as we have the lock by now :)
  910. */
  911. static int ocfs2_force_read_journal(struct inode *inode)
  912. {
  913. int status = 0;
  914. int i;
  915. u64 v_blkno, p_blkno, p_blocks, num_blocks;
  916. #define CONCURRENT_JOURNAL_FILL 32ULL
  917. struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
  918. mlog_entry_void();
  919. memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
  920. num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
  921. v_blkno = 0;
  922. while (v_blkno < num_blocks) {
  923. status = ocfs2_extent_map_get_blocks(inode, v_blkno,
  924. &p_blkno, &p_blocks, NULL);
  925. if (status < 0) {
  926. mlog_errno(status);
  927. goto bail;
  928. }
  929. if (p_blocks > CONCURRENT_JOURNAL_FILL)
  930. p_blocks = CONCURRENT_JOURNAL_FILL;
  931. /* We are reading journal data which should not
  932. * be put in the uptodate cache */
  933. status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb),
  934. p_blkno, p_blocks, bhs);
  935. if (status < 0) {
  936. mlog_errno(status);
  937. goto bail;
  938. }
  939. for(i = 0; i < p_blocks; i++) {
  940. brelse(bhs[i]);
  941. bhs[i] = NULL;
  942. }
  943. v_blkno += p_blocks;
  944. }
  945. bail:
  946. for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++)
  947. brelse(bhs[i]);
  948. mlog_exit(status);
  949. return status;
  950. }
  951. struct ocfs2_la_recovery_item {
  952. struct list_head lri_list;
  953. int lri_slot;
  954. struct ocfs2_dinode *lri_la_dinode;
  955. struct ocfs2_dinode *lri_tl_dinode;
  956. struct ocfs2_quota_recovery *lri_qrec;
  957. };
  958. /* Does the second half of the recovery process. By this point, the
  959. * node is marked clean and can actually be considered recovered,
  960. * hence it's no longer in the recovery map, but there's still some
  961. * cleanup we can do which shouldn't happen within the recovery thread
  962. * as locking in that context becomes very difficult if we are to take
  963. * recovering nodes into account.
  964. *
  965. * NOTE: This function can and will sleep on recovery of other nodes
  966. * during cluster locking, just like any other ocfs2 process.
  967. */
  968. void ocfs2_complete_recovery(struct work_struct *work)
  969. {
  970. int ret;
  971. struct ocfs2_journal *journal =
  972. container_of(work, struct ocfs2_journal, j_recovery_work);
  973. struct ocfs2_super *osb = journal->j_osb;
  974. struct ocfs2_dinode *la_dinode, *tl_dinode;
  975. struct ocfs2_la_recovery_item *item, *n;
  976. struct ocfs2_quota_recovery *qrec;
  977. LIST_HEAD(tmp_la_list);
  978. mlog_entry_void();
  979. mlog(0, "completing recovery from keventd\n");
  980. spin_lock(&journal->j_lock);
  981. list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
  982. spin_unlock(&journal->j_lock);
  983. list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
  984. list_del_init(&item->lri_list);
  985. mlog(0, "Complete recovery for slot %d\n", item->lri_slot);
  986. ocfs2_wait_on_quotas(osb);
  987. la_dinode = item->lri_la_dinode;
  988. if (la_dinode) {
  989. mlog(0, "Clean up local alloc %llu\n",
  990. (unsigned long long)le64_to_cpu(la_dinode->i_blkno));
  991. ret = ocfs2_complete_local_alloc_recovery(osb,
  992. la_dinode);
  993. if (ret < 0)
  994. mlog_errno(ret);
  995. kfree(la_dinode);
  996. }
  997. tl_dinode = item->lri_tl_dinode;
  998. if (tl_dinode) {
  999. mlog(0, "Clean up truncate log %llu\n",
  1000. (unsigned long long)le64_to_cpu(tl_dinode->i_blkno));
  1001. ret = ocfs2_complete_truncate_log_recovery(osb,
  1002. tl_dinode);
  1003. if (ret < 0)
  1004. mlog_errno(ret);
  1005. kfree(tl_dinode);
  1006. }
  1007. ret = ocfs2_recover_orphans(osb, item->lri_slot);
  1008. if (ret < 0)
  1009. mlog_errno(ret);
  1010. qrec = item->lri_qrec;
  1011. if (qrec) {
  1012. mlog(0, "Recovering quota files");
  1013. ret = ocfs2_finish_quota_recovery(osb, qrec,
  1014. item->lri_slot);
  1015. if (ret < 0)
  1016. mlog_errno(ret);
  1017. /* Recovery info is already freed now */
  1018. }
  1019. kfree(item);
  1020. }
  1021. mlog(0, "Recovery completion\n");
  1022. mlog_exit_void();
  1023. }
  1024. /* NOTE: This function always eats your references to la_dinode and
  1025. * tl_dinode, either manually on error, or by passing them to
  1026. * ocfs2_complete_recovery */
  1027. static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
  1028. int slot_num,
  1029. struct ocfs2_dinode *la_dinode,
  1030. struct ocfs2_dinode *tl_dinode,
  1031. struct ocfs2_quota_recovery *qrec)
  1032. {
  1033. struct ocfs2_la_recovery_item *item;
  1034. item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
  1035. if (!item) {
  1036. /* Though we wish to avoid it, we are in fact safe in
  1037. * skipping local alloc cleanup as fsck.ocfs2 is more
  1038. * than capable of reclaiming unused space. */
  1039. if (la_dinode)
  1040. kfree(la_dinode);
  1041. if (tl_dinode)
  1042. kfree(tl_dinode);
  1043. if (qrec)
  1044. ocfs2_free_quota_recovery(qrec);
  1045. mlog_errno(-ENOMEM);
  1046. return;
  1047. }
  1048. INIT_LIST_HEAD(&item->lri_list);
  1049. item->lri_la_dinode = la_dinode;
  1050. item->lri_slot = slot_num;
  1051. item->lri_tl_dinode = tl_dinode;
  1052. item->lri_qrec = qrec;
  1053. spin_lock(&journal->j_lock);
  1054. list_add_tail(&item->lri_list, &journal->j_la_cleanups);
  1055. queue_work(ocfs2_wq, &journal->j_recovery_work);
  1056. spin_unlock(&journal->j_lock);
  1057. }
  1058. /* Called by the mount code to queue recovery the last part of
  1059. * recovery for it's own and offline slot(s). */
  1060. void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
  1061. {
  1062. struct ocfs2_journal *journal = osb->journal;
  1063. /* No need to queue up our truncate_log as regular cleanup will catch
  1064. * that */
  1065. ocfs2_queue_recovery_completion(journal, osb->slot_num,
  1066. osb->local_alloc_copy, NULL, NULL);
  1067. ocfs2_schedule_truncate_log_flush(osb, 0);
  1068. osb->local_alloc_copy = NULL;
  1069. osb->dirty = 0;
  1070. /* queue to recover orphan slots for all offline slots */
  1071. ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
  1072. ocfs2_queue_replay_slots(osb);
  1073. ocfs2_free_replay_slots(osb);
  1074. }
  1075. void ocfs2_complete_quota_recovery(struct ocfs2_super *osb)
  1076. {
  1077. if (osb->quota_rec) {
  1078. ocfs2_queue_recovery_completion(osb->journal,
  1079. osb->slot_num,
  1080. NULL,
  1081. NULL,
  1082. osb->quota_rec);
  1083. osb->quota_rec = NULL;
  1084. }
  1085. }
  1086. static int __ocfs2_recovery_thread(void *arg)
  1087. {
  1088. int status, node_num, slot_num;
  1089. struct ocfs2_super *osb = arg;
  1090. struct ocfs2_recovery_map *rm = osb->recovery_map;
  1091. int *rm_quota = NULL;
  1092. int rm_quota_used = 0, i;
  1093. struct ocfs2_quota_recovery *qrec;
  1094. mlog_entry_void();
  1095. status = ocfs2_wait_on_mount(osb);
  1096. if (status < 0) {
  1097. goto bail;
  1098. }
  1099. rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS);
  1100. if (!rm_quota) {
  1101. status = -ENOMEM;
  1102. goto bail;
  1103. }
  1104. restart:
  1105. status = ocfs2_super_lock(osb, 1);
  1106. if (status < 0) {
  1107. mlog_errno(status);
  1108. goto bail;
  1109. }
  1110. status = ocfs2_compute_replay_slots(osb);
  1111. if (status < 0)
  1112. mlog_errno(status);
  1113. /* queue recovery for our own slot */
  1114. ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
  1115. NULL, NULL);
  1116. spin_lock(&osb->osb_lock);
  1117. while (rm->rm_used) {
  1118. /* It's always safe to remove entry zero, as we won't
  1119. * clear it until ocfs2_recover_node() has succeeded. */
  1120. node_num = rm->rm_entries[0];
  1121. spin_unlock(&osb->osb_lock);
  1122. mlog(0, "checking node %d\n", node_num);
  1123. slot_num = ocfs2_node_num_to_slot(osb, node_num);
  1124. if (slot_num == -ENOENT) {
  1125. status = 0;
  1126. mlog(0, "no slot for this node, so no recovery"
  1127. "required.\n");
  1128. goto skip_recovery;
  1129. }
  1130. mlog(0, "node %d was using slot %d\n", node_num, slot_num);
  1131. /* It is a bit subtle with quota recovery. We cannot do it
  1132. * immediately because we have to obtain cluster locks from
  1133. * quota files and we also don't want to just skip it because
  1134. * then quota usage would be out of sync until some node takes
  1135. * the slot. So we remember which nodes need quota recovery
  1136. * and when everything else is done, we recover quotas. */
  1137. for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++);
  1138. if (i == rm_quota_used)
  1139. rm_quota[rm_quota_used++] = slot_num;
  1140. status = ocfs2_recover_node(osb, node_num, slot_num);
  1141. skip_recovery:
  1142. if (!status) {
  1143. ocfs2_recovery_map_clear(osb, node_num);
  1144. } else {
  1145. mlog(ML_ERROR,
  1146. "Error %d recovering node %d on device (%u,%u)!\n",
  1147. status, node_num,
  1148. MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
  1149. mlog(ML_ERROR, "Volume requires unmount.\n");
  1150. }
  1151. spin_lock(&osb->osb_lock);
  1152. }
  1153. spin_unlock(&osb->osb_lock);
  1154. mlog(0, "All nodes recovered\n");
  1155. /* Refresh all journal recovery generations from disk */
  1156. status = ocfs2_check_journals_nolocks(osb);
  1157. status = (status == -EROFS) ? 0 : status;
  1158. if (status < 0)
  1159. mlog_errno(status);
  1160. /* Now it is right time to recover quotas... We have to do this under
  1161. * superblock lock so that noone can start using the slot (and crash)
  1162. * before we recover it */
  1163. for (i = 0; i < rm_quota_used; i++) {
  1164. qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
  1165. if (IS_ERR(qrec)) {
  1166. status = PTR_ERR(qrec);
  1167. mlog_errno(status);
  1168. continue;
  1169. }
  1170. ocfs2_queue_recovery_completion(osb->journal, rm_quota[i],
  1171. NULL, NULL, qrec);
  1172. }
  1173. ocfs2_super_unlock(osb, 1);
  1174. /* queue recovery for offline slots */
  1175. ocfs2_queue_replay_slots(osb);
  1176. bail:
  1177. mutex_lock(&osb->recovery_lock);
  1178. if (!status && !ocfs2_recovery_completed(osb)) {
  1179. mutex_unlock(&osb->recovery_lock);
  1180. goto restart;
  1181. }
  1182. ocfs2_free_replay_slots(osb);
  1183. osb->recovery_thread_task = NULL;
  1184. mb(); /* sync with ocfs2_recovery_thread_running */
  1185. wake_up(&osb->recovery_event);
  1186. mutex_unlock(&osb->recovery_lock);
  1187. if (rm_quota)
  1188. kfree(rm_quota);
  1189. mlog_exit(status);
  1190. /* no one is callint kthread_stop() for us so the kthread() api
  1191. * requires that we call do_exit(). And it isn't exported, but
  1192. * complete_and_exit() seems to be a minimal wrapper around it. */
  1193. complete_and_exit(NULL, status);
  1194. return status;
  1195. }
  1196. void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
  1197. {
  1198. mlog_entry("(node_num=%d, osb->node_num = %d)\n",
  1199. node_num, osb->node_num);
  1200. mutex_lock(&osb->recovery_lock);
  1201. if (osb->disable_recovery)
  1202. goto out;
  1203. /* People waiting on recovery will wait on
  1204. * the recovery map to empty. */
  1205. if (ocfs2_recovery_map_set(osb, node_num))
  1206. mlog(0, "node %d already in recovery map.\n", node_num);
  1207. mlog(0, "starting recovery thread...\n");
  1208. if (osb->recovery_thread_task)
  1209. goto out;
  1210. osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb,
  1211. "ocfs2rec");
  1212. if (IS_ERR(osb->recovery_thread_task)) {
  1213. mlog_errno((int)PTR_ERR(osb->recovery_thread_task));
  1214. osb->recovery_thread_task = NULL;
  1215. }
  1216. out:
  1217. mutex_unlock(&osb->recovery_lock);
  1218. wake_up(&osb->recovery_event);
  1219. mlog_exit_void();
  1220. }
  1221. static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
  1222. int slot_num,
  1223. struct buffer_head **bh,
  1224. struct inode **ret_inode)
  1225. {
  1226. int status = -EACCES;
  1227. struct inode *inode = NULL;
  1228. BUG_ON(slot_num >= osb->max_slots);
  1229. inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
  1230. slot_num);
  1231. if (!inode || is_bad_inode(inode)) {
  1232. mlog_errno(status);
  1233. goto bail;
  1234. }
  1235. SET_INODE_JOURNAL(inode);
  1236. status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE);
  1237. if (status < 0) {
  1238. mlog_errno(status);
  1239. goto bail;
  1240. }
  1241. status = 0;
  1242. bail:
  1243. if (inode) {
  1244. if (status || !ret_inode)
  1245. iput(inode);
  1246. else
  1247. *ret_inode = inode;
  1248. }
  1249. return status;
  1250. }
  1251. /* Does the actual journal replay and marks the journal inode as
  1252. * clean. Will only replay if the journal inode is marked dirty. */
  1253. static int ocfs2_replay_journal(struct ocfs2_super *osb,
  1254. int node_num,
  1255. int slot_num)
  1256. {
  1257. int status;
  1258. int got_lock = 0;
  1259. unsigned int flags;
  1260. struct inode *inode = NULL;
  1261. struct ocfs2_dinode *fe;
  1262. journal_t *journal = NULL;
  1263. struct buffer_head *bh = NULL;
  1264. u32 slot_reco_gen;
  1265. status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode);
  1266. if (status) {
  1267. mlog_errno(status);
  1268. goto done;
  1269. }
  1270. fe = (struct ocfs2_dinode *)bh->b_data;
  1271. slot_reco_gen = ocfs2_get_recovery_generation(fe);
  1272. brelse(bh);
  1273. bh = NULL;
  1274. /*
  1275. * As the fs recovery is asynchronous, there is a small chance that
  1276. * another node mounted (and recovered) the slot before the recovery
  1277. * thread could get the lock. To handle that, we dirty read the journal
  1278. * inode for that slot to get the recovery generation. If it is
  1279. * different than what we expected, the slot has been recovered.
  1280. * If not, it needs recovery.
  1281. */
  1282. if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
  1283. mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num,
  1284. osb->slot_recovery_generations[slot_num], slot_reco_gen);
  1285. osb->slot_recovery_generations[slot_num] = slot_reco_gen;
  1286. status = -EBUSY;
  1287. goto done;
  1288. }
  1289. /* Continue with recovery as the journal has not yet been recovered */
  1290. status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
  1291. if (status < 0) {
  1292. mlog(0, "status returned from ocfs2_inode_lock=%d\n", status);
  1293. if (status != -ERESTARTSYS)
  1294. mlog(ML_ERROR, "Could not lock journal!\n");
  1295. goto done;
  1296. }
  1297. got_lock = 1;
  1298. fe = (struct ocfs2_dinode *) bh->b_data;
  1299. flags = le32_to_cpu(fe->id1.journal1.ij_flags);
  1300. slot_reco_gen = ocfs2_get_recovery_generation(fe);
  1301. if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
  1302. mlog(0, "No recovery required for node %d\n", node_num);
  1303. /* Refresh recovery generation for the slot */
  1304. osb->slot_recovery_generations[slot_num] = slot_reco_gen;
  1305. goto done;
  1306. }
  1307. /* we need to run complete recovery for offline orphan slots */
  1308. ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
  1309. mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n",
  1310. node_num, slot_num,
  1311. MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
  1312. OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
  1313. status = ocfs2_force_read_journal(inode);
  1314. if (status < 0) {
  1315. mlog_errno(status);
  1316. goto done;
  1317. }
  1318. mlog(0, "calling journal_init_inode\n");
  1319. journal = jbd2_journal_init_inode(inode);
  1320. if (journal == NULL) {
  1321. mlog(ML_ERROR, "Linux journal layer error\n");
  1322. status = -EIO;
  1323. goto done;
  1324. }
  1325. status = jbd2_journal_load(journal);
  1326. if (status < 0) {
  1327. mlog_errno(status);
  1328. if (!igrab(inode))
  1329. BUG();
  1330. jbd2_journal_destroy(journal);
  1331. goto done;
  1332. }
  1333. ocfs2_clear_journal_error(osb->sb, journal, slot_num);
  1334. /* wipe the journal */
  1335. mlog(0, "flushing the journal.\n");
  1336. jbd2_journal_lock_updates(journal);
  1337. status = jbd2_journal_flush(journal);
  1338. jbd2_journal_unlock_updates(journal);
  1339. if (status < 0)
  1340. mlog_errno(status);
  1341. /* This will mark the node clean */
  1342. flags = le32_to_cpu(fe->id1.journal1.ij_flags);
  1343. flags &= ~OCFS2_JOURNAL_DIRTY_FL;
  1344. fe->id1.journal1.ij_flags = cpu_to_le32(flags);
  1345. /* Increment recovery generation to indicate successful recovery */
  1346. ocfs2_bump_recovery_generation(fe);
  1347. osb->slot_recovery_generations[slot_num] =
  1348. ocfs2_get_recovery_generation(fe);
  1349. ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
  1350. status = ocfs2_write_block(osb, bh, INODE_CACHE(inode));
  1351. if (status < 0)
  1352. mlog_errno(status);
  1353. if (!igrab(inode))
  1354. BUG();
  1355. jbd2_journal_destroy(journal);
  1356. done:
  1357. /* drop the lock on this nodes journal */
  1358. if (got_lock)
  1359. ocfs2_inode_unlock(inode, 1);
  1360. if (inode)
  1361. iput(inode);
  1362. brelse(bh);
  1363. mlog_exit(status);
  1364. return status;
  1365. }
  1366. /*
  1367. * Do the most important parts of node recovery:
  1368. * - Replay it's journal
  1369. * - Stamp a clean local allocator file
  1370. * - Stamp a clean truncate log
  1371. * - Mark the node clean
  1372. *
  1373. * If this function completes without error, a node in OCFS2 can be
  1374. * said to have been safely recovered. As a result, failure during the
  1375. * second part of a nodes recovery process (local alloc recovery) is
  1376. * far less concerning.
  1377. */
  1378. static int ocfs2_recover_node(struct ocfs2_super *osb,
  1379. int node_num, int slot_num)
  1380. {
  1381. int status = 0;
  1382. struct ocfs2_dinode *la_copy = NULL;
  1383. struct ocfs2_dinode *tl_copy = NULL;
  1384. mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n",
  1385. node_num, slot_num, osb->node_num);
  1386. /* Should not ever be called to recover ourselves -- in that
  1387. * case we should've called ocfs2_journal_load instead. */
  1388. BUG_ON(osb->node_num == node_num);
  1389. status = ocfs2_replay_journal(osb, node_num, slot_num);
  1390. if (status < 0) {
  1391. if (status == -EBUSY) {
  1392. mlog(0, "Skipping recovery for slot %u (node %u) "
  1393. "as another node has recovered it\n", slot_num,
  1394. node_num);
  1395. status = 0;
  1396. goto done;
  1397. }
  1398. mlog_errno(status);
  1399. goto done;
  1400. }
  1401. /* Stamp a clean local alloc file AFTER recovering the journal... */
  1402. status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy);
  1403. if (status < 0) {
  1404. mlog_errno(status);
  1405. goto done;
  1406. }
  1407. /* An error from begin_truncate_log_recovery is not
  1408. * serious enough to warrant halting the rest of
  1409. * recovery. */
  1410. status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy);
  1411. if (status < 0)
  1412. mlog_errno(status);
  1413. /* Likewise, this would be a strange but ultimately not so
  1414. * harmful place to get an error... */
  1415. status = ocfs2_clear_slot(osb, slot_num);
  1416. if (status < 0)
  1417. mlog_errno(status);
  1418. /* This will kfree the memory pointed to by la_copy and tl_copy */
  1419. ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy,
  1420. tl_copy, NULL);
  1421. status = 0;
  1422. done:
  1423. mlog_exit(status);
  1424. return status;
  1425. }
  1426. /* Test node liveness by trylocking his journal. If we get the lock,
  1427. * we drop it here. Return 0 if we got the lock, -EAGAIN if node is
  1428. * still alive (we couldn't get the lock) and < 0 on error. */
  1429. static int ocfs2_trylock_journal(struct ocfs2_super *osb,
  1430. int slot_num)
  1431. {
  1432. int status, flags;
  1433. struct inode *inode = NULL;
  1434. inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
  1435. slot_num);
  1436. if (inode == NULL) {
  1437. mlog(ML_ERROR, "access error\n");
  1438. status = -EACCES;
  1439. goto bail;
  1440. }
  1441. if (is_bad_inode(inode)) {
  1442. mlog(ML_ERROR, "access error (bad inode)\n");
  1443. iput(inode);
  1444. inode = NULL;
  1445. status = -EACCES;
  1446. goto bail;
  1447. }
  1448. SET_INODE_JOURNAL(inode);
  1449. flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE;
  1450. status = ocfs2_inode_lock_full(inode, NULL, 1, flags);
  1451. if (status < 0) {
  1452. if (status != -EAGAIN)
  1453. mlog_errno(status);
  1454. goto bail;
  1455. }
  1456. ocfs2_inode_unlock(inode, 1);
  1457. bail:
  1458. if (inode)
  1459. iput(inode);
  1460. return status;
  1461. }
  1462. /* Call this underneath ocfs2_super_lock. It also assumes that the
  1463. * slot info struct has been updated from disk. */
  1464. int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
  1465. {
  1466. unsigned int node_num;
  1467. int status, i;
  1468. u32 gen;
  1469. struct buffer_head *bh = NULL;
  1470. struct ocfs2_dinode *di;
  1471. /* This is called with the super block cluster lock, so we
  1472. * know that the slot map can't change underneath us. */
  1473. for (i = 0; i < osb->max_slots; i++) {
  1474. /* Read journal inode to get the recovery generation */
  1475. status = ocfs2_read_journal_inode(osb, i, &bh, NULL);
  1476. if (status) {
  1477. mlog_errno(status);
  1478. goto bail;
  1479. }
  1480. di = (struct ocfs2_dinode *)bh->b_data;
  1481. gen = ocfs2_get_recovery_generation(di);
  1482. brelse(bh);
  1483. bh = NULL;
  1484. spin_lock(&osb->osb_lock);
  1485. osb->slot_recovery_generations[i] = gen;
  1486. mlog(0, "Slot %u recovery generation is %u\n", i,
  1487. osb->slot_recovery_generations[i]);
  1488. if (i == osb->slot_num) {
  1489. spin_unlock(&osb->osb_lock);
  1490. continue;
  1491. }
  1492. status = ocfs2_slot_to_node_num_locked(osb, i, &node_num);
  1493. if (status == -ENOENT) {
  1494. spin_unlock(&osb->osb_lock);
  1495. continue;
  1496. }
  1497. if (__ocfs2_recovery_map_test(osb, node_num)) {
  1498. spin_unlock(&osb->osb_lock);
  1499. continue;
  1500. }
  1501. spin_unlock(&osb->osb_lock);
  1502. /* Ok, we have a slot occupied by another node which
  1503. * is not in the recovery map. We trylock his journal
  1504. * file here to test if he's alive. */
  1505. status = ocfs2_trylock_journal(osb, i);
  1506. if (!status) {
  1507. /* Since we're called from mount, we know that
  1508. * the recovery thread can't race us on
  1509. * setting / checking the recovery bits. */
  1510. ocfs2_recovery_thread(osb, node_num);
  1511. } else if ((status < 0) && (status != -EAGAIN)) {
  1512. mlog_errno(status);
  1513. goto bail;
  1514. }
  1515. }
  1516. status = 0;
  1517. bail:
  1518. mlog_exit(status);
  1519. return status;
  1520. }
  1521. /*
  1522. * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some
  1523. * randomness to the timeout to minimize multple nodes firing the timer at the
  1524. * same time.
  1525. */
  1526. static inline unsigned long ocfs2_orphan_scan_timeout(void)
  1527. {
  1528. unsigned long time;
  1529. get_random_bytes(&time, sizeof(time));
  1530. time = ORPHAN_SCAN_SCHEDULE_TIMEOUT + (time % 5000);
  1531. return msecs_to_jiffies(time);
  1532. }
  1533. /*
  1534. * ocfs2_queue_orphan_scan calls ocfs2_queue_recovery_completion for
  1535. * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
  1536. * is done to catch any orphans that are left over in orphan directories.
  1537. *
  1538. * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
  1539. * seconds. It gets an EX lock on os_lockres and checks sequence number
  1540. * stored in LVB. If the sequence number has changed, it means some other
  1541. * node has done the scan. This node skips the scan and tracks the
  1542. * sequence number. If the sequence number didn't change, it means a scan
  1543. * hasn't happened. The node queues a scan and increments the
  1544. * sequence number in the LVB.
  1545. */
  1546. void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
  1547. {
  1548. struct ocfs2_orphan_scan *os;
  1549. int status, i;
  1550. u32 seqno = 0;
  1551. os = &osb->osb_orphan_scan;
  1552. mlog(0, "Begin orphan scan\n");
  1553. if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
  1554. goto out;
  1555. status = ocfs2_orphan_scan_lock(osb, &seqno);
  1556. if (status < 0) {
  1557. if (status != -EAGAIN)
  1558. mlog_errno(status);
  1559. goto out;
  1560. }
  1561. /* Do no queue the tasks if the volume is being umounted */
  1562. if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
  1563. goto unlock;
  1564. if (os->os_seqno != seqno) {
  1565. os->os_seqno = seqno;
  1566. goto unlock;
  1567. }
  1568. for (i = 0; i < osb->max_slots; i++)
  1569. ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL,
  1570. NULL);
  1571. /*
  1572. * We queued a recovery on orphan slots, increment the sequence
  1573. * number and update LVB so other node will skip the scan for a while
  1574. */
  1575. seqno++;
  1576. os->os_count++;
  1577. os->os_scantime = CURRENT_TIME;
  1578. unlock:
  1579. ocfs2_orphan_scan_unlock(osb, seqno);
  1580. out:
  1581. mlog(0, "Orphan scan completed\n");
  1582. return;
  1583. }
  1584. /* Worker task that gets fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT millsec */
  1585. void ocfs2_orphan_scan_work(struct work_struct *work)
  1586. {
  1587. struct ocfs2_orphan_scan *os;
  1588. struct ocfs2_super *osb;
  1589. os = container_of(work, struct ocfs2_orphan_scan,
  1590. os_orphan_scan_work.work);
  1591. osb = os->os_osb;
  1592. mutex_lock(&os->os_lock);
  1593. ocfs2_queue_orphan_scan(osb);
  1594. if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
  1595. queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
  1596. ocfs2_orphan_scan_timeout());
  1597. mutex_unlock(&os->os_lock);
  1598. }
  1599. void ocfs2_orphan_scan_stop(struct ocfs2_super *osb)
  1600. {
  1601. struct ocfs2_orphan_scan *os;
  1602. os = &osb->osb_orphan_scan;
  1603. if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) {
  1604. atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
  1605. mutex_lock(&os->os_lock);
  1606. cancel_delayed_work(&os->os_orphan_scan_work);
  1607. mutex_unlock(&os->os_lock);
  1608. }
  1609. }
  1610. void ocfs2_orphan_scan_init(struct ocfs2_super *osb)
  1611. {
  1612. struct ocfs2_orphan_scan *os;
  1613. os = &osb->osb_orphan_scan;
  1614. os->os_osb = osb;
  1615. os->os_count = 0;
  1616. os->os_seqno = 0;
  1617. mutex_init(&os->os_lock);
  1618. INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work);
  1619. }
  1620. void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
  1621. {
  1622. struct ocfs2_orphan_scan *os;
  1623. os = &osb->osb_orphan_scan;
  1624. os->os_scantime = CURRENT_TIME;
  1625. if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
  1626. atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
  1627. else {
  1628. atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
  1629. queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
  1630. ocfs2_orphan_scan_timeout());
  1631. }
  1632. }
  1633. struct ocfs2_orphan_filldir_priv {
  1634. struct inode *head;
  1635. struct ocfs2_super *osb;
  1636. };
  1637. static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len,
  1638. loff_t pos, u64 ino, unsigned type)
  1639. {
  1640. struct ocfs2_orphan_filldir_priv *p = priv;
  1641. struct inode *iter;
  1642. if (name_len == 1 && !strncmp(".", name, 1))
  1643. return 0;
  1644. if (name_len == 2 && !strncmp("..", name, 2))
  1645. return 0;
  1646. /* Skip bad inodes so that recovery can continue */
  1647. iter = ocfs2_iget(p->osb, ino,
  1648. OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0);
  1649. if (IS_ERR(iter))
  1650. return 0;
  1651. mlog(0, "queue orphan %llu\n",
  1652. (unsigned long long)OCFS2_I(iter)->ip_blkno);
  1653. /* No locking is required for the next_orphan queue as there
  1654. * is only ever a single process doing orphan recovery. */
  1655. OCFS2_I(iter)->ip_next_orphan = p->head;
  1656. p->head = iter;
  1657. return 0;
  1658. }
  1659. static int ocfs2_queue_orphans(struct ocfs2_super *osb,
  1660. int slot,
  1661. struct inode **head)
  1662. {
  1663. int status;
  1664. struct inode *orphan_dir_inode = NULL;
  1665. struct ocfs2_orphan_filldir_priv priv;
  1666. loff_t pos = 0;
  1667. priv.osb = osb;
  1668. priv.head = *head;
  1669. orphan_dir_inode = ocfs2_get_system_file_inode(osb,
  1670. ORPHAN_DIR_SYSTEM_INODE,
  1671. slot);
  1672. if (!orphan_dir_inode) {
  1673. status = -ENOENT;
  1674. mlog_errno(status);
  1675. return status;
  1676. }
  1677. mutex_lock(&orphan_dir_inode->i_mutex);
  1678. status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
  1679. if (status < 0) {
  1680. mlog_errno(status);
  1681. goto out;
  1682. }
  1683. status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv,
  1684. ocfs2_orphan_filldir);
  1685. if (status) {
  1686. mlog_errno(status);
  1687. goto out_cluster;
  1688. }
  1689. *head = priv.head;
  1690. out_cluster:
  1691. ocfs2_inode_unlock(orphan_dir_inode, 0);
  1692. out:
  1693. mutex_unlock(&orphan_dir_inode->i_mutex);
  1694. iput(orphan_dir_inode);
  1695. return status;
  1696. }
  1697. static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb,
  1698. int slot)
  1699. {
  1700. int ret;
  1701. spin_lock(&osb->osb_lock);
  1702. ret = !osb->osb_orphan_wipes[slot];
  1703. spin_unlock(&osb->osb_lock);
  1704. return ret;
  1705. }
  1706. static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb,
  1707. int slot)
  1708. {
  1709. spin_lock(&osb->osb_lock);
  1710. /* Mark ourselves such that new processes in delete_inode()
  1711. * know to quit early. */
  1712. ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
  1713. while (osb->osb_orphan_wipes[slot]) {
  1714. /* If any processes are already in the middle of an
  1715. * orphan wipe on this dir, then we need to wait for
  1716. * them. */
  1717. spin_unlock(&osb->osb_lock);
  1718. wait_event_interruptible(osb->osb_wipe_event,
  1719. ocfs2_orphan_recovery_can_continue(osb, slot));
  1720. spin_lock(&osb->osb_lock);
  1721. }
  1722. spin_unlock(&osb->osb_lock);
  1723. }
  1724. static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb,
  1725. int slot)
  1726. {
  1727. ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
  1728. }
  1729. /*
  1730. * Orphan recovery. Each mounted node has it's own orphan dir which we
  1731. * must run during recovery. Our strategy here is to build a list of
  1732. * the inodes in the orphan dir and iget/iput them. The VFS does
  1733. * (most) of the rest of the work.
  1734. *
  1735. * Orphan recovery can happen at any time, not just mount so we have a
  1736. * couple of extra considerations.
  1737. *
  1738. * - We grab as many inodes as we can under the orphan dir lock -
  1739. * doing iget() outside the orphan dir risks getting a reference on
  1740. * an invalid inode.
  1741. * - We must be sure not to deadlock with other processes on the
  1742. * system wanting to run delete_inode(). This can happen when they go
  1743. * to lock the orphan dir and the orphan recovery process attempts to
  1744. * iget() inside the orphan dir lock. This can be avoided by
  1745. * advertising our state to ocfs2_delete_inode().
  1746. */
  1747. static int ocfs2_recover_orphans(struct ocfs2_super *osb,
  1748. int slot)
  1749. {
  1750. int ret = 0;
  1751. struct inode *inode = NULL;
  1752. struct inode *iter;
  1753. struct ocfs2_inode_info *oi;
  1754. mlog(0, "Recover inodes from orphan dir in slot %d\n", slot);
  1755. ocfs2_mark_recovering_orphan_dir(osb, slot);
  1756. ret = ocfs2_queue_orphans(osb, slot, &inode);
  1757. ocfs2_clear_recovering_orphan_dir(osb, slot);
  1758. /* Error here should be noted, but we want to continue with as
  1759. * many queued inodes as we've got. */
  1760. if (ret)
  1761. mlog_errno(ret);
  1762. while (inode) {
  1763. oi = OCFS2_I(inode);
  1764. mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno);
  1765. iter = oi->ip_next_orphan;
  1766. spin_lock(&oi->ip_lock);
  1767. /* The remote delete code may have set these on the
  1768. * assumption that the other node would wipe them
  1769. * successfully. If they are still in the node's
  1770. * orphan dir, we need to reset that state. */
  1771. oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE);
  1772. /* Set the proper information to get us going into
  1773. * ocfs2_delete_inode. */
  1774. oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
  1775. spin_unlock(&oi->ip_lock);
  1776. iput(inode);
  1777. inode = iter;
  1778. }
  1779. return ret;
  1780. }
  1781. static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota)
  1782. {
  1783. /* This check is good because ocfs2 will wait on our recovery
  1784. * thread before changing it to something other than MOUNTED
  1785. * or DISABLED. */
  1786. wait_event(osb->osb_mount_event,
  1787. (!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) ||
  1788. atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS ||
  1789. atomic_read(&osb->vol_state) == VOLUME_DISABLED);
  1790. /* If there's an error on mount, then we may never get to the
  1791. * MOUNTED flag, but this is set right before
  1792. * dismount_volume() so we can trust it. */
  1793. if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
  1794. mlog(0, "mount error, exiting!\n");
  1795. return -EBUSY;
  1796. }
  1797. return 0;
  1798. }
  1799. static int ocfs2_commit_thread(void *arg)
  1800. {
  1801. int status;
  1802. struct ocfs2_super *osb = arg;
  1803. struct ocfs2_journal *journal = osb->journal;
  1804. /* we can trust j_num_trans here because _should_stop() is only set in
  1805. * shutdown and nobody other than ourselves should be able to start
  1806. * transactions. committing on shutdown might take a few iterations
  1807. * as final transactions put deleted inodes on the list */
  1808. while (!(kthread_should_stop() &&
  1809. atomic_read(&journal->j_num_trans) == 0)) {
  1810. wait_event_interruptible(osb->checkpoint_event,
  1811. atomic_read(&journal->j_num_trans)
  1812. || kthread_should_stop());
  1813. status = ocfs2_commit_cache(osb);
  1814. if (status < 0)
  1815. mlog_errno(status);
  1816. if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){
  1817. mlog(ML_KTHREAD,
  1818. "commit_thread: %u transactions pending on "
  1819. "shutdown\n",
  1820. atomic_read(&journal->j_num_trans));
  1821. }
  1822. }
  1823. return 0;
  1824. }
  1825. /* Reads all the journal inodes without taking any cluster locks. Used
  1826. * for hard readonly access to determine whether any journal requires
  1827. * recovery. Also used to refresh the recovery generation numbers after
  1828. * a journal has been recovered by another node.
  1829. */
  1830. int ocfs2_check_journals_nolocks(struct ocfs2_super *osb)
  1831. {
  1832. int ret = 0;
  1833. unsigned int slot;
  1834. struct buffer_head *di_bh = NULL;
  1835. struct ocfs2_dinode *di;
  1836. int journal_dirty = 0;
  1837. for(slot = 0; slot < osb->max_slots; slot++) {
  1838. ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL);
  1839. if (ret) {
  1840. mlog_errno(ret);
  1841. goto out;
  1842. }
  1843. di = (struct ocfs2_dinode *) di_bh->b_data;
  1844. osb->slot_recovery_generations[slot] =
  1845. ocfs2_get_recovery_generation(di);
  1846. if (le32_to_cpu(di->id1.journal1.ij_flags) &
  1847. OCFS2_JOURNAL_DIRTY_FL)
  1848. journal_dirty = 1;
  1849. brelse(di_bh);
  1850. di_bh = NULL;
  1851. }
  1852. out:
  1853. if (journal_dirty)
  1854. ret = -EROFS;
  1855. return ret;
  1856. }