journal.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * journal.c
  5. *
  6. * Defines functions of journalling api
  7. *
  8. * Copyright (C) 2003, 2004 Oracle. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2 of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public
  21. * License along with this program; if not, write to the
  22. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23. * Boston, MA 021110-1307, USA.
  24. */
  25. #include <linux/fs.h>
  26. #include <linux/types.h>
  27. #include <linux/slab.h>
  28. #include <linux/highmem.h>
  29. #include <linux/kthread.h>
  30. #include <linux/time.h>
  31. #include <linux/random.h>
  32. #define MLOG_MASK_PREFIX ML_JOURNAL
  33. #include <cluster/masklog.h>
  34. #include "ocfs2.h"
  35. #include "alloc.h"
  36. #include "blockcheck.h"
  37. #include "dir.h"
  38. #include "dlmglue.h"
  39. #include "extent_map.h"
  40. #include "heartbeat.h"
  41. #include "inode.h"
  42. #include "journal.h"
  43. #include "localalloc.h"
  44. #include "slot_map.h"
  45. #include "super.h"
  46. #include "sysfile.h"
  47. #include "uptodate.h"
  48. #include "quota.h"
  49. #include "buffer_head_io.h"
  50. DEFINE_SPINLOCK(trans_inc_lock);
  51. #define ORPHAN_SCAN_SCHEDULE_TIMEOUT 300000
  52. static int ocfs2_force_read_journal(struct inode *inode);
  53. static int ocfs2_recover_node(struct ocfs2_super *osb,
  54. int node_num, int slot_num);
  55. static int __ocfs2_recovery_thread(void *arg);
  56. static int ocfs2_commit_cache(struct ocfs2_super *osb);
  57. static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota);
  58. static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
  59. int dirty, int replayed);
  60. static int ocfs2_trylock_journal(struct ocfs2_super *osb,
  61. int slot_num);
  62. static int ocfs2_recover_orphans(struct ocfs2_super *osb,
  63. int slot);
  64. static int ocfs2_commit_thread(void *arg);
  65. static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
  66. int slot_num,
  67. struct ocfs2_dinode *la_dinode,
  68. struct ocfs2_dinode *tl_dinode,
  69. struct ocfs2_quota_recovery *qrec);
  70. static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb)
  71. {
  72. return __ocfs2_wait_on_mount(osb, 0);
  73. }
  74. static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb)
  75. {
  76. return __ocfs2_wait_on_mount(osb, 1);
  77. }
  78. /*
  79. * This replay_map is to track online/offline slots, so we could recover
  80. * offline slots during recovery and mount
  81. */
  82. enum ocfs2_replay_state {
  83. REPLAY_UNNEEDED = 0, /* Replay is not needed, so ignore this map */
  84. REPLAY_NEEDED, /* Replay slots marked in rm_replay_slots */
  85. REPLAY_DONE /* Replay was already queued */
  86. };
  87. struct ocfs2_replay_map {
  88. unsigned int rm_slots;
  89. enum ocfs2_replay_state rm_state;
  90. unsigned char rm_replay_slots[0];
  91. };
  92. void ocfs2_replay_map_set_state(struct ocfs2_super *osb, int state)
  93. {
  94. if (!osb->replay_map)
  95. return;
  96. /* If we've already queued the replay, we don't have any more to do */
  97. if (osb->replay_map->rm_state == REPLAY_DONE)
  98. return;
  99. osb->replay_map->rm_state = state;
  100. }
  101. int ocfs2_compute_replay_slots(struct ocfs2_super *osb)
  102. {
  103. struct ocfs2_replay_map *replay_map;
  104. int i, node_num;
  105. /* If replay map is already set, we don't do it again */
  106. if (osb->replay_map)
  107. return 0;
  108. replay_map = kzalloc(sizeof(struct ocfs2_replay_map) +
  109. (osb->max_slots * sizeof(char)), GFP_KERNEL);
  110. if (!replay_map) {
  111. mlog_errno(-ENOMEM);
  112. return -ENOMEM;
  113. }
  114. spin_lock(&osb->osb_lock);
  115. replay_map->rm_slots = osb->max_slots;
  116. replay_map->rm_state = REPLAY_UNNEEDED;
  117. /* set rm_replay_slots for offline slot(s) */
  118. for (i = 0; i < replay_map->rm_slots; i++) {
  119. if (ocfs2_slot_to_node_num_locked(osb, i, &node_num) == -ENOENT)
  120. replay_map->rm_replay_slots[i] = 1;
  121. }
  122. osb->replay_map = replay_map;
  123. spin_unlock(&osb->osb_lock);
  124. return 0;
  125. }
  126. void ocfs2_queue_replay_slots(struct ocfs2_super *osb)
  127. {
  128. struct ocfs2_replay_map *replay_map = osb->replay_map;
  129. int i;
  130. if (!replay_map)
  131. return;
  132. if (replay_map->rm_state != REPLAY_NEEDED)
  133. return;
  134. for (i = 0; i < replay_map->rm_slots; i++)
  135. if (replay_map->rm_replay_slots[i])
  136. ocfs2_queue_recovery_completion(osb->journal, i, NULL,
  137. NULL, NULL);
  138. replay_map->rm_state = REPLAY_DONE;
  139. }
  140. void ocfs2_free_replay_slots(struct ocfs2_super *osb)
  141. {
  142. struct ocfs2_replay_map *replay_map = osb->replay_map;
  143. if (!osb->replay_map)
  144. return;
  145. kfree(replay_map);
  146. osb->replay_map = NULL;
  147. }
  148. int ocfs2_recovery_init(struct ocfs2_super *osb)
  149. {
  150. struct ocfs2_recovery_map *rm;
  151. mutex_init(&osb->recovery_lock);
  152. osb->disable_recovery = 0;
  153. osb->recovery_thread_task = NULL;
  154. init_waitqueue_head(&osb->recovery_event);
  155. rm = kzalloc(sizeof(struct ocfs2_recovery_map) +
  156. osb->max_slots * sizeof(unsigned int),
  157. GFP_KERNEL);
  158. if (!rm) {
  159. mlog_errno(-ENOMEM);
  160. return -ENOMEM;
  161. }
  162. rm->rm_entries = (unsigned int *)((char *)rm +
  163. sizeof(struct ocfs2_recovery_map));
  164. osb->recovery_map = rm;
  165. return 0;
  166. }
  167. /* we can't grab the goofy sem lock from inside wait_event, so we use
  168. * memory barriers to make sure that we'll see the null task before
  169. * being woken up */
  170. static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
  171. {
  172. mb();
  173. return osb->recovery_thread_task != NULL;
  174. }
  175. void ocfs2_recovery_exit(struct ocfs2_super *osb)
  176. {
  177. struct ocfs2_recovery_map *rm;
  178. /* disable any new recovery threads and wait for any currently
  179. * running ones to exit. Do this before setting the vol_state. */
  180. mutex_lock(&osb->recovery_lock);
  181. osb->disable_recovery = 1;
  182. mutex_unlock(&osb->recovery_lock);
  183. wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
  184. /* At this point, we know that no more recovery threads can be
  185. * launched, so wait for any recovery completion work to
  186. * complete. */
  187. flush_workqueue(ocfs2_wq);
  188. /*
  189. * Now that recovery is shut down, and the osb is about to be
  190. * freed, the osb_lock is not taken here.
  191. */
  192. rm = osb->recovery_map;
  193. /* XXX: Should we bug if there are dirty entries? */
  194. kfree(rm);
  195. }
  196. static int __ocfs2_recovery_map_test(struct ocfs2_super *osb,
  197. unsigned int node_num)
  198. {
  199. int i;
  200. struct ocfs2_recovery_map *rm = osb->recovery_map;
  201. assert_spin_locked(&osb->osb_lock);
  202. for (i = 0; i < rm->rm_used; i++) {
  203. if (rm->rm_entries[i] == node_num)
  204. return 1;
  205. }
  206. return 0;
  207. }
  208. /* Behaves like test-and-set. Returns the previous value */
  209. static int ocfs2_recovery_map_set(struct ocfs2_super *osb,
  210. unsigned int node_num)
  211. {
  212. struct ocfs2_recovery_map *rm = osb->recovery_map;
  213. spin_lock(&osb->osb_lock);
  214. if (__ocfs2_recovery_map_test(osb, node_num)) {
  215. spin_unlock(&osb->osb_lock);
  216. return 1;
  217. }
  218. /* XXX: Can this be exploited? Not from o2dlm... */
  219. BUG_ON(rm->rm_used >= osb->max_slots);
  220. rm->rm_entries[rm->rm_used] = node_num;
  221. rm->rm_used++;
  222. spin_unlock(&osb->osb_lock);
  223. return 0;
  224. }
  225. static void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
  226. unsigned int node_num)
  227. {
  228. int i;
  229. struct ocfs2_recovery_map *rm = osb->recovery_map;
  230. spin_lock(&osb->osb_lock);
  231. for (i = 0; i < rm->rm_used; i++) {
  232. if (rm->rm_entries[i] == node_num)
  233. break;
  234. }
  235. if (i < rm->rm_used) {
  236. /* XXX: be careful with the pointer math */
  237. memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]),
  238. (rm->rm_used - i - 1) * sizeof(unsigned int));
  239. rm->rm_used--;
  240. }
  241. spin_unlock(&osb->osb_lock);
  242. }
  243. static int ocfs2_commit_cache(struct ocfs2_super *osb)
  244. {
  245. int status = 0;
  246. unsigned int flushed;
  247. unsigned long old_id;
  248. struct ocfs2_journal *journal = NULL;
  249. mlog_entry_void();
  250. journal = osb->journal;
  251. /* Flush all pending commits and checkpoint the journal. */
  252. down_write(&journal->j_trans_barrier);
  253. if (atomic_read(&journal->j_num_trans) == 0) {
  254. up_write(&journal->j_trans_barrier);
  255. mlog(0, "No transactions for me to flush!\n");
  256. goto finally;
  257. }
  258. jbd2_journal_lock_updates(journal->j_journal);
  259. status = jbd2_journal_flush(journal->j_journal);
  260. jbd2_journal_unlock_updates(journal->j_journal);
  261. if (status < 0) {
  262. up_write(&journal->j_trans_barrier);
  263. mlog_errno(status);
  264. goto finally;
  265. }
  266. old_id = ocfs2_inc_trans_id(journal);
  267. flushed = atomic_read(&journal->j_num_trans);
  268. atomic_set(&journal->j_num_trans, 0);
  269. up_write(&journal->j_trans_barrier);
  270. mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n",
  271. journal->j_trans_id, flushed);
  272. ocfs2_wake_downconvert_thread(osb);
  273. wake_up(&journal->j_checkpointed);
  274. finally:
  275. mlog_exit(status);
  276. return status;
  277. }
  278. /* pass it NULL and it will allocate a new handle object for you. If
  279. * you pass it a handle however, it may still return error, in which
  280. * case it has free'd the passed handle for you. */
  281. handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
  282. {
  283. journal_t *journal = osb->journal->j_journal;
  284. handle_t *handle;
  285. BUG_ON(!osb || !osb->journal->j_journal);
  286. if (ocfs2_is_hard_readonly(osb))
  287. return ERR_PTR(-EROFS);
  288. BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE);
  289. BUG_ON(max_buffs <= 0);
  290. /* Nested transaction? Just return the handle... */
  291. if (journal_current_handle())
  292. return jbd2_journal_start(journal, max_buffs);
  293. down_read(&osb->journal->j_trans_barrier);
  294. handle = jbd2_journal_start(journal, max_buffs);
  295. if (IS_ERR(handle)) {
  296. up_read(&osb->journal->j_trans_barrier);
  297. mlog_errno(PTR_ERR(handle));
  298. if (is_journal_aborted(journal)) {
  299. ocfs2_abort(osb->sb, "Detected aborted journal");
  300. handle = ERR_PTR(-EROFS);
  301. }
  302. } else {
  303. if (!ocfs2_mount_local(osb))
  304. atomic_inc(&(osb->journal->j_num_trans));
  305. }
  306. return handle;
  307. }
  308. int ocfs2_commit_trans(struct ocfs2_super *osb,
  309. handle_t *handle)
  310. {
  311. int ret, nested;
  312. struct ocfs2_journal *journal = osb->journal;
  313. BUG_ON(!handle);
  314. nested = handle->h_ref > 1;
  315. ret = jbd2_journal_stop(handle);
  316. if (ret < 0)
  317. mlog_errno(ret);
  318. if (!nested)
  319. up_read(&journal->j_trans_barrier);
  320. return ret;
  321. }
  322. /*
  323. * 'nblocks' is what you want to add to the current
  324. * transaction. extend_trans will either extend the current handle by
  325. * nblocks, or commit it and start a new one with nblocks credits.
  326. *
  327. * This might call jbd2_journal_restart() which will commit dirty buffers
  328. * and then restart the transaction. Before calling
  329. * ocfs2_extend_trans(), any changed blocks should have been
  330. * dirtied. After calling it, all blocks which need to be changed must
  331. * go through another set of journal_access/journal_dirty calls.
  332. *
  333. * WARNING: This will not release any semaphores or disk locks taken
  334. * during the transaction, so make sure they were taken *before*
  335. * start_trans or we'll have ordering deadlocks.
  336. *
  337. * WARNING2: Note that we do *not* drop j_trans_barrier here. This is
  338. * good because transaction ids haven't yet been recorded on the
  339. * cluster locks associated with this handle.
  340. */
  341. int ocfs2_extend_trans(handle_t *handle, int nblocks)
  342. {
  343. int status;
  344. BUG_ON(!handle);
  345. BUG_ON(!nblocks);
  346. mlog_entry_void();
  347. mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
  348. #ifdef CONFIG_OCFS2_DEBUG_FS
  349. status = 1;
  350. #else
  351. status = jbd2_journal_extend(handle, nblocks);
  352. if (status < 0) {
  353. mlog_errno(status);
  354. goto bail;
  355. }
  356. #endif
  357. if (status > 0) {
  358. mlog(0,
  359. "jbd2_journal_extend failed, trying "
  360. "jbd2_journal_restart\n");
  361. status = jbd2_journal_restart(handle, nblocks);
  362. if (status < 0) {
  363. mlog_errno(status);
  364. goto bail;
  365. }
  366. }
  367. status = 0;
  368. bail:
  369. mlog_exit(status);
  370. return status;
  371. }
  372. struct ocfs2_triggers {
  373. struct jbd2_buffer_trigger_type ot_triggers;
  374. int ot_offset;
  375. };
  376. static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers)
  377. {
  378. return container_of(triggers, struct ocfs2_triggers, ot_triggers);
  379. }
  380. static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
  381. struct buffer_head *bh,
  382. void *data, size_t size)
  383. {
  384. struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
  385. /*
  386. * We aren't guaranteed to have the superblock here, so we
  387. * must unconditionally compute the ecc data.
  388. * __ocfs2_journal_access() will only set the triggers if
  389. * metaecc is enabled.
  390. */
  391. ocfs2_block_check_compute(data, size, data + ot->ot_offset);
  392. }
  393. /*
  394. * Quota blocks have their own trigger because the struct ocfs2_block_check
  395. * offset depends on the blocksize.
  396. */
  397. static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
  398. struct buffer_head *bh,
  399. void *data, size_t size)
  400. {
  401. struct ocfs2_disk_dqtrailer *dqt =
  402. ocfs2_block_dqtrailer(size, data);
  403. /*
  404. * We aren't guaranteed to have the superblock here, so we
  405. * must unconditionally compute the ecc data.
  406. * __ocfs2_journal_access() will only set the triggers if
  407. * metaecc is enabled.
  408. */
  409. ocfs2_block_check_compute(data, size, &dqt->dq_check);
  410. }
  411. /*
  412. * Directory blocks also have their own trigger because the
  413. * struct ocfs2_block_check offset depends on the blocksize.
  414. */
  415. static void ocfs2_db_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
  416. struct buffer_head *bh,
  417. void *data, size_t size)
  418. {
  419. struct ocfs2_dir_block_trailer *trailer =
  420. ocfs2_dir_trailer_from_size(size, data);
  421. /*
  422. * We aren't guaranteed to have the superblock here, so we
  423. * must unconditionally compute the ecc data.
  424. * __ocfs2_journal_access() will only set the triggers if
  425. * metaecc is enabled.
  426. */
  427. ocfs2_block_check_compute(data, size, &trailer->db_check);
  428. }
  429. static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
  430. struct buffer_head *bh)
  431. {
  432. mlog(ML_ERROR,
  433. "ocfs2_abort_trigger called by JBD2. bh = 0x%lx, "
  434. "bh->b_blocknr = %llu\n",
  435. (unsigned long)bh,
  436. (unsigned long long)bh->b_blocknr);
  437. /* We aren't guaranteed to have the superblock here - but if we
  438. * don't, it'll just crash. */
  439. ocfs2_error(bh->b_assoc_map->host->i_sb,
  440. "JBD2 has aborted our journal, ocfs2 cannot continue\n");
  441. }
  442. static struct ocfs2_triggers di_triggers = {
  443. .ot_triggers = {
  444. .t_commit = ocfs2_commit_trigger,
  445. .t_abort = ocfs2_abort_trigger,
  446. },
  447. .ot_offset = offsetof(struct ocfs2_dinode, i_check),
  448. };
  449. static struct ocfs2_triggers eb_triggers = {
  450. .ot_triggers = {
  451. .t_commit = ocfs2_commit_trigger,
  452. .t_abort = ocfs2_abort_trigger,
  453. },
  454. .ot_offset = offsetof(struct ocfs2_extent_block, h_check),
  455. };
  456. static struct ocfs2_triggers rb_triggers = {
  457. .ot_triggers = {
  458. .t_commit = ocfs2_commit_trigger,
  459. .t_abort = ocfs2_abort_trigger,
  460. },
  461. .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check),
  462. };
  463. static struct ocfs2_triggers gd_triggers = {
  464. .ot_triggers = {
  465. .t_commit = ocfs2_commit_trigger,
  466. .t_abort = ocfs2_abort_trigger,
  467. },
  468. .ot_offset = offsetof(struct ocfs2_group_desc, bg_check),
  469. };
  470. static struct ocfs2_triggers db_triggers = {
  471. .ot_triggers = {
  472. .t_commit = ocfs2_db_commit_trigger,
  473. .t_abort = ocfs2_abort_trigger,
  474. },
  475. };
  476. static struct ocfs2_triggers xb_triggers = {
  477. .ot_triggers = {
  478. .t_commit = ocfs2_commit_trigger,
  479. .t_abort = ocfs2_abort_trigger,
  480. },
  481. .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check),
  482. };
  483. static struct ocfs2_triggers dq_triggers = {
  484. .ot_triggers = {
  485. .t_commit = ocfs2_dq_commit_trigger,
  486. .t_abort = ocfs2_abort_trigger,
  487. },
  488. };
  489. static struct ocfs2_triggers dr_triggers = {
  490. .ot_triggers = {
  491. .t_commit = ocfs2_commit_trigger,
  492. .t_abort = ocfs2_abort_trigger,
  493. },
  494. .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check),
  495. };
  496. static struct ocfs2_triggers dl_triggers = {
  497. .ot_triggers = {
  498. .t_commit = ocfs2_commit_trigger,
  499. .t_abort = ocfs2_abort_trigger,
  500. },
  501. .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check),
  502. };
  503. static int __ocfs2_journal_access(handle_t *handle,
  504. struct ocfs2_caching_info *ci,
  505. struct buffer_head *bh,
  506. struct ocfs2_triggers *triggers,
  507. int type)
  508. {
  509. int status;
  510. struct ocfs2_super *osb =
  511. OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
  512. BUG_ON(!ci || !ci->ci_ops);
  513. BUG_ON(!handle);
  514. BUG_ON(!bh);
  515. mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
  516. (unsigned long long)bh->b_blocknr, type,
  517. (type == OCFS2_JOURNAL_ACCESS_CREATE) ?
  518. "OCFS2_JOURNAL_ACCESS_CREATE" :
  519. "OCFS2_JOURNAL_ACCESS_WRITE",
  520. bh->b_size);
  521. /* we can safely remove this assertion after testing. */
  522. if (!buffer_uptodate(bh)) {
  523. mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
  524. mlog(ML_ERROR, "b_blocknr=%llu\n",
  525. (unsigned long long)bh->b_blocknr);
  526. BUG();
  527. }
  528. /* Set the current transaction information on the ci so
  529. * that the locking code knows whether it can drop it's locks
  530. * on this ci or not. We're protected from the commit
  531. * thread updating the current transaction id until
  532. * ocfs2_commit_trans() because ocfs2_start_trans() took
  533. * j_trans_barrier for us. */
  534. ocfs2_set_ci_lock_trans(osb->journal, ci);
  535. ocfs2_metadata_cache_io_lock(ci);
  536. switch (type) {
  537. case OCFS2_JOURNAL_ACCESS_CREATE:
  538. case OCFS2_JOURNAL_ACCESS_WRITE:
  539. status = jbd2_journal_get_write_access(handle, bh);
  540. break;
  541. case OCFS2_JOURNAL_ACCESS_UNDO:
  542. status = jbd2_journal_get_undo_access(handle, bh);
  543. break;
  544. default:
  545. status = -EINVAL;
  546. mlog(ML_ERROR, "Unknown access type!\n");
  547. }
  548. if (!status && ocfs2_meta_ecc(osb) && triggers)
  549. jbd2_journal_set_triggers(bh, &triggers->ot_triggers);
  550. ocfs2_metadata_cache_io_unlock(ci);
  551. if (status < 0)
  552. mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
  553. status, type);
  554. mlog_exit(status);
  555. return status;
  556. }
  557. int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
  558. struct buffer_head *bh, int type)
  559. {
  560. return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type);
  561. }
  562. int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
  563. struct buffer_head *bh, int type)
  564. {
  565. return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type);
  566. }
  567. int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
  568. struct buffer_head *bh, int type)
  569. {
  570. return __ocfs2_journal_access(handle, ci, bh, &rb_triggers,
  571. type);
  572. }
  573. int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
  574. struct buffer_head *bh, int type)
  575. {
  576. return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type);
  577. }
  578. int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
  579. struct buffer_head *bh, int type)
  580. {
  581. return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type);
  582. }
  583. int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
  584. struct buffer_head *bh, int type)
  585. {
  586. return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type);
  587. }
  588. int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
  589. struct buffer_head *bh, int type)
  590. {
  591. return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type);
  592. }
  593. int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
  594. struct buffer_head *bh, int type)
  595. {
  596. return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type);
  597. }
  598. int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
  599. struct buffer_head *bh, int type)
  600. {
  601. return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type);
  602. }
  603. int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
  604. struct buffer_head *bh, int type)
  605. {
  606. return __ocfs2_journal_access(handle, ci, bh, NULL, type);
  607. }
  608. int ocfs2_journal_dirty(handle_t *handle,
  609. struct buffer_head *bh)
  610. {
  611. int status;
  612. mlog_entry("(bh->b_blocknr=%llu)\n",
  613. (unsigned long long)bh->b_blocknr);
  614. status = jbd2_journal_dirty_metadata(handle, bh);
  615. if (status < 0)
  616. mlog(ML_ERROR, "Could not dirty metadata buffer. "
  617. "(bh->b_blocknr=%llu)\n",
  618. (unsigned long long)bh->b_blocknr);
  619. mlog_exit(status);
  620. return status;
  621. }
  622. #define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE)
  623. void ocfs2_set_journal_params(struct ocfs2_super *osb)
  624. {
  625. journal_t *journal = osb->journal->j_journal;
  626. unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL;
  627. if (osb->osb_commit_interval)
  628. commit_interval = osb->osb_commit_interval;
  629. spin_lock(&journal->j_state_lock);
  630. journal->j_commit_interval = commit_interval;
  631. if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
  632. journal->j_flags |= JBD2_BARRIER;
  633. else
  634. journal->j_flags &= ~JBD2_BARRIER;
  635. spin_unlock(&journal->j_state_lock);
  636. }
  637. int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
  638. {
  639. int status = -1;
  640. struct inode *inode = NULL; /* the journal inode */
  641. journal_t *j_journal = NULL;
  642. struct ocfs2_dinode *di = NULL;
  643. struct buffer_head *bh = NULL;
  644. struct ocfs2_super *osb;
  645. int inode_lock = 0;
  646. mlog_entry_void();
  647. BUG_ON(!journal);
  648. osb = journal->j_osb;
  649. /* already have the inode for our journal */
  650. inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
  651. osb->slot_num);
  652. if (inode == NULL) {
  653. status = -EACCES;
  654. mlog_errno(status);
  655. goto done;
  656. }
  657. if (is_bad_inode(inode)) {
  658. mlog(ML_ERROR, "access error (bad inode)\n");
  659. iput(inode);
  660. inode = NULL;
  661. status = -EACCES;
  662. goto done;
  663. }
  664. SET_INODE_JOURNAL(inode);
  665. OCFS2_I(inode)->ip_open_count++;
  666. /* Skip recovery waits here - journal inode metadata never
  667. * changes in a live cluster so it can be considered an
  668. * exception to the rule. */
  669. status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
  670. if (status < 0) {
  671. if (status != -ERESTARTSYS)
  672. mlog(ML_ERROR, "Could not get lock on journal!\n");
  673. goto done;
  674. }
  675. inode_lock = 1;
  676. di = (struct ocfs2_dinode *)bh->b_data;
  677. if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) {
  678. mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
  679. inode->i_size);
  680. status = -EINVAL;
  681. goto done;
  682. }
  683. mlog(0, "inode->i_size = %lld\n", inode->i_size);
  684. mlog(0, "inode->i_blocks = %llu\n",
  685. (unsigned long long)inode->i_blocks);
  686. mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
  687. /* call the kernels journal init function now */
  688. j_journal = jbd2_journal_init_inode(inode);
  689. if (j_journal == NULL) {
  690. mlog(ML_ERROR, "Linux journal layer error\n");
  691. status = -EINVAL;
  692. goto done;
  693. }
  694. mlog(0, "Returned from jbd2_journal_init_inode\n");
  695. mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen);
  696. *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
  697. OCFS2_JOURNAL_DIRTY_FL);
  698. journal->j_journal = j_journal;
  699. journal->j_inode = inode;
  700. journal->j_bh = bh;
  701. ocfs2_set_journal_params(osb);
  702. journal->j_state = OCFS2_JOURNAL_LOADED;
  703. status = 0;
  704. done:
  705. if (status < 0) {
  706. if (inode_lock)
  707. ocfs2_inode_unlock(inode, 1);
  708. brelse(bh);
  709. if (inode) {
  710. OCFS2_I(inode)->ip_open_count--;
  711. iput(inode);
  712. }
  713. }
  714. mlog_exit(status);
  715. return status;
  716. }
  717. static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di)
  718. {
  719. le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1);
  720. }
  721. static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di)
  722. {
  723. return le32_to_cpu(di->id1.journal1.ij_recovery_generation);
  724. }
  725. static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
  726. int dirty, int replayed)
  727. {
  728. int status;
  729. unsigned int flags;
  730. struct ocfs2_journal *journal = osb->journal;
  731. struct buffer_head *bh = journal->j_bh;
  732. struct ocfs2_dinode *fe;
  733. mlog_entry_void();
  734. fe = (struct ocfs2_dinode *)bh->b_data;
  735. /* The journal bh on the osb always comes from ocfs2_journal_init()
  736. * and was validated there inside ocfs2_inode_lock_full(). It's a
  737. * code bug if we mess it up. */
  738. BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
  739. flags = le32_to_cpu(fe->id1.journal1.ij_flags);
  740. if (dirty)
  741. flags |= OCFS2_JOURNAL_DIRTY_FL;
  742. else
  743. flags &= ~OCFS2_JOURNAL_DIRTY_FL;
  744. fe->id1.journal1.ij_flags = cpu_to_le32(flags);
  745. if (replayed)
  746. ocfs2_bump_recovery_generation(fe);
  747. ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
  748. status = ocfs2_write_block(osb, bh, INODE_CACHE(journal->j_inode));
  749. if (status < 0)
  750. mlog_errno(status);
  751. mlog_exit(status);
  752. return status;
  753. }
  754. /*
  755. * If the journal has been kmalloc'd it needs to be freed after this
  756. * call.
  757. */
  758. void ocfs2_journal_shutdown(struct ocfs2_super *osb)
  759. {
  760. struct ocfs2_journal *journal = NULL;
  761. int status = 0;
  762. struct inode *inode = NULL;
  763. int num_running_trans = 0;
  764. mlog_entry_void();
  765. BUG_ON(!osb);
  766. journal = osb->journal;
  767. if (!journal)
  768. goto done;
  769. inode = journal->j_inode;
  770. if (journal->j_state != OCFS2_JOURNAL_LOADED)
  771. goto done;
  772. /* need to inc inode use count - jbd2_journal_destroy will iput. */
  773. if (!igrab(inode))
  774. BUG();
  775. num_running_trans = atomic_read(&(osb->journal->j_num_trans));
  776. if (num_running_trans > 0)
  777. mlog(0, "Shutting down journal: must wait on %d "
  778. "running transactions!\n",
  779. num_running_trans);
  780. /* Do a commit_cache here. It will flush our journal, *and*
  781. * release any locks that are still held.
  782. * set the SHUTDOWN flag and release the trans lock.
  783. * the commit thread will take the trans lock for us below. */
  784. journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN;
  785. /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not
  786. * drop the trans_lock (which we want to hold until we
  787. * completely destroy the journal. */
  788. if (osb->commit_task) {
  789. /* Wait for the commit thread */
  790. mlog(0, "Waiting for ocfs2commit to exit....\n");
  791. kthread_stop(osb->commit_task);
  792. osb->commit_task = NULL;
  793. }
  794. BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
  795. if (ocfs2_mount_local(osb)) {
  796. jbd2_journal_lock_updates(journal->j_journal);
  797. status = jbd2_journal_flush(journal->j_journal);
  798. jbd2_journal_unlock_updates(journal->j_journal);
  799. if (status < 0)
  800. mlog_errno(status);
  801. }
  802. if (status == 0) {
  803. /*
  804. * Do not toggle if flush was unsuccessful otherwise
  805. * will leave dirty metadata in a "clean" journal
  806. */
  807. status = ocfs2_journal_toggle_dirty(osb, 0, 0);
  808. if (status < 0)
  809. mlog_errno(status);
  810. }
  811. /* Shutdown the kernel journal system */
  812. jbd2_journal_destroy(journal->j_journal);
  813. journal->j_journal = NULL;
  814. OCFS2_I(inode)->ip_open_count--;
  815. /* unlock our journal */
  816. ocfs2_inode_unlock(inode, 1);
  817. brelse(journal->j_bh);
  818. journal->j_bh = NULL;
  819. journal->j_state = OCFS2_JOURNAL_FREE;
  820. // up_write(&journal->j_trans_barrier);
  821. done:
  822. if (inode)
  823. iput(inode);
  824. mlog_exit_void();
  825. }
  826. static void ocfs2_clear_journal_error(struct super_block *sb,
  827. journal_t *journal,
  828. int slot)
  829. {
  830. int olderr;
  831. olderr = jbd2_journal_errno(journal);
  832. if (olderr) {
  833. mlog(ML_ERROR, "File system error %d recorded in "
  834. "journal %u.\n", olderr, slot);
  835. mlog(ML_ERROR, "File system on device %s needs checking.\n",
  836. sb->s_id);
  837. jbd2_journal_ack_err(journal);
  838. jbd2_journal_clear_err(journal);
  839. }
  840. }
  841. int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
  842. {
  843. int status = 0;
  844. struct ocfs2_super *osb;
  845. mlog_entry_void();
  846. BUG_ON(!journal);
  847. osb = journal->j_osb;
  848. status = jbd2_journal_load(journal->j_journal);
  849. if (status < 0) {
  850. mlog(ML_ERROR, "Failed to load journal!\n");
  851. goto done;
  852. }
  853. ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
  854. status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
  855. if (status < 0) {
  856. mlog_errno(status);
  857. goto done;
  858. }
  859. /* Launch the commit thread */
  860. if (!local) {
  861. osb->commit_task = kthread_run(ocfs2_commit_thread, osb,
  862. "ocfs2cmt");
  863. if (IS_ERR(osb->commit_task)) {
  864. status = PTR_ERR(osb->commit_task);
  865. osb->commit_task = NULL;
  866. mlog(ML_ERROR, "unable to launch ocfs2commit thread, "
  867. "error=%d", status);
  868. goto done;
  869. }
  870. } else
  871. osb->commit_task = NULL;
  872. done:
  873. mlog_exit(status);
  874. return status;
  875. }
  876. /* 'full' flag tells us whether we clear out all blocks or if we just
  877. * mark the journal clean */
  878. int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
  879. {
  880. int status;
  881. mlog_entry_void();
  882. BUG_ON(!journal);
  883. status = jbd2_journal_wipe(journal->j_journal, full);
  884. if (status < 0) {
  885. mlog_errno(status);
  886. goto bail;
  887. }
  888. status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0);
  889. if (status < 0)
  890. mlog_errno(status);
  891. bail:
  892. mlog_exit(status);
  893. return status;
  894. }
  895. static int ocfs2_recovery_completed(struct ocfs2_super *osb)
  896. {
  897. int empty;
  898. struct ocfs2_recovery_map *rm = osb->recovery_map;
  899. spin_lock(&osb->osb_lock);
  900. empty = (rm->rm_used == 0);
  901. spin_unlock(&osb->osb_lock);
  902. return empty;
  903. }
  904. void ocfs2_wait_for_recovery(struct ocfs2_super *osb)
  905. {
  906. wait_event(osb->recovery_event, ocfs2_recovery_completed(osb));
  907. }
  908. /*
  909. * JBD Might read a cached version of another nodes journal file. We
  910. * don't want this as this file changes often and we get no
  911. * notification on those changes. The only way to be sure that we've
  912. * got the most up to date version of those blocks then is to force
  913. * read them off disk. Just searching through the buffer cache won't
  914. * work as there may be pages backing this file which are still marked
  915. * up to date. We know things can't change on this file underneath us
  916. * as we have the lock by now :)
  917. */
  918. static int ocfs2_force_read_journal(struct inode *inode)
  919. {
  920. int status = 0;
  921. int i;
  922. u64 v_blkno, p_blkno, p_blocks, num_blocks;
  923. #define CONCURRENT_JOURNAL_FILL 32ULL
  924. struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
  925. mlog_entry_void();
  926. memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
  927. num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
  928. v_blkno = 0;
  929. while (v_blkno < num_blocks) {
  930. status = ocfs2_extent_map_get_blocks(inode, v_blkno,
  931. &p_blkno, &p_blocks, NULL);
  932. if (status < 0) {
  933. mlog_errno(status);
  934. goto bail;
  935. }
  936. if (p_blocks > CONCURRENT_JOURNAL_FILL)
  937. p_blocks = CONCURRENT_JOURNAL_FILL;
  938. /* We are reading journal data which should not
  939. * be put in the uptodate cache */
  940. status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb),
  941. p_blkno, p_blocks, bhs);
  942. if (status < 0) {
  943. mlog_errno(status);
  944. goto bail;
  945. }
  946. for(i = 0; i < p_blocks; i++) {
  947. brelse(bhs[i]);
  948. bhs[i] = NULL;
  949. }
  950. v_blkno += p_blocks;
  951. }
  952. bail:
  953. for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++)
  954. brelse(bhs[i]);
  955. mlog_exit(status);
  956. return status;
  957. }
  958. struct ocfs2_la_recovery_item {
  959. struct list_head lri_list;
  960. int lri_slot;
  961. struct ocfs2_dinode *lri_la_dinode;
  962. struct ocfs2_dinode *lri_tl_dinode;
  963. struct ocfs2_quota_recovery *lri_qrec;
  964. };
  965. /* Does the second half of the recovery process. By this point, the
  966. * node is marked clean and can actually be considered recovered,
  967. * hence it's no longer in the recovery map, but there's still some
  968. * cleanup we can do which shouldn't happen within the recovery thread
  969. * as locking in that context becomes very difficult if we are to take
  970. * recovering nodes into account.
  971. *
  972. * NOTE: This function can and will sleep on recovery of other nodes
  973. * during cluster locking, just like any other ocfs2 process.
  974. */
  975. void ocfs2_complete_recovery(struct work_struct *work)
  976. {
  977. int ret;
  978. struct ocfs2_journal *journal =
  979. container_of(work, struct ocfs2_journal, j_recovery_work);
  980. struct ocfs2_super *osb = journal->j_osb;
  981. struct ocfs2_dinode *la_dinode, *tl_dinode;
  982. struct ocfs2_la_recovery_item *item, *n;
  983. struct ocfs2_quota_recovery *qrec;
  984. LIST_HEAD(tmp_la_list);
  985. mlog_entry_void();
  986. mlog(0, "completing recovery from keventd\n");
  987. spin_lock(&journal->j_lock);
  988. list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
  989. spin_unlock(&journal->j_lock);
  990. list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) {
  991. list_del_init(&item->lri_list);
  992. mlog(0, "Complete recovery for slot %d\n", item->lri_slot);
  993. ocfs2_wait_on_quotas(osb);
  994. la_dinode = item->lri_la_dinode;
  995. if (la_dinode) {
  996. mlog(0, "Clean up local alloc %llu\n",
  997. (unsigned long long)le64_to_cpu(la_dinode->i_blkno));
  998. ret = ocfs2_complete_local_alloc_recovery(osb,
  999. la_dinode);
  1000. if (ret < 0)
  1001. mlog_errno(ret);
  1002. kfree(la_dinode);
  1003. }
  1004. tl_dinode = item->lri_tl_dinode;
  1005. if (tl_dinode) {
  1006. mlog(0, "Clean up truncate log %llu\n",
  1007. (unsigned long long)le64_to_cpu(tl_dinode->i_blkno));
  1008. ret = ocfs2_complete_truncate_log_recovery(osb,
  1009. tl_dinode);
  1010. if (ret < 0)
  1011. mlog_errno(ret);
  1012. kfree(tl_dinode);
  1013. }
  1014. ret = ocfs2_recover_orphans(osb, item->lri_slot);
  1015. if (ret < 0)
  1016. mlog_errno(ret);
  1017. qrec = item->lri_qrec;
  1018. if (qrec) {
  1019. mlog(0, "Recovering quota files");
  1020. ret = ocfs2_finish_quota_recovery(osb, qrec,
  1021. item->lri_slot);
  1022. if (ret < 0)
  1023. mlog_errno(ret);
  1024. /* Recovery info is already freed now */
  1025. }
  1026. kfree(item);
  1027. }
  1028. mlog(0, "Recovery completion\n");
  1029. mlog_exit_void();
  1030. }
  1031. /* NOTE: This function always eats your references to la_dinode and
  1032. * tl_dinode, either manually on error, or by passing them to
  1033. * ocfs2_complete_recovery */
  1034. static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
  1035. int slot_num,
  1036. struct ocfs2_dinode *la_dinode,
  1037. struct ocfs2_dinode *tl_dinode,
  1038. struct ocfs2_quota_recovery *qrec)
  1039. {
  1040. struct ocfs2_la_recovery_item *item;
  1041. item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
  1042. if (!item) {
  1043. /* Though we wish to avoid it, we are in fact safe in
  1044. * skipping local alloc cleanup as fsck.ocfs2 is more
  1045. * than capable of reclaiming unused space. */
  1046. if (la_dinode)
  1047. kfree(la_dinode);
  1048. if (tl_dinode)
  1049. kfree(tl_dinode);
  1050. if (qrec)
  1051. ocfs2_free_quota_recovery(qrec);
  1052. mlog_errno(-ENOMEM);
  1053. return;
  1054. }
  1055. INIT_LIST_HEAD(&item->lri_list);
  1056. item->lri_la_dinode = la_dinode;
  1057. item->lri_slot = slot_num;
  1058. item->lri_tl_dinode = tl_dinode;
  1059. item->lri_qrec = qrec;
  1060. spin_lock(&journal->j_lock);
  1061. list_add_tail(&item->lri_list, &journal->j_la_cleanups);
  1062. queue_work(ocfs2_wq, &journal->j_recovery_work);
  1063. spin_unlock(&journal->j_lock);
  1064. }
  1065. /* Called by the mount code to queue recovery the last part of
  1066. * recovery for it's own and offline slot(s). */
  1067. void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
  1068. {
  1069. struct ocfs2_journal *journal = osb->journal;
  1070. /* No need to queue up our truncate_log as regular cleanup will catch
  1071. * that */
  1072. ocfs2_queue_recovery_completion(journal, osb->slot_num,
  1073. osb->local_alloc_copy, NULL, NULL);
  1074. ocfs2_schedule_truncate_log_flush(osb, 0);
  1075. osb->local_alloc_copy = NULL;
  1076. osb->dirty = 0;
  1077. /* queue to recover orphan slots for all offline slots */
  1078. ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
  1079. ocfs2_queue_replay_slots(osb);
  1080. ocfs2_free_replay_slots(osb);
  1081. }
  1082. void ocfs2_complete_quota_recovery(struct ocfs2_super *osb)
  1083. {
  1084. if (osb->quota_rec) {
  1085. ocfs2_queue_recovery_completion(osb->journal,
  1086. osb->slot_num,
  1087. NULL,
  1088. NULL,
  1089. osb->quota_rec);
  1090. osb->quota_rec = NULL;
  1091. }
  1092. }
  1093. static int __ocfs2_recovery_thread(void *arg)
  1094. {
  1095. int status, node_num, slot_num;
  1096. struct ocfs2_super *osb = arg;
  1097. struct ocfs2_recovery_map *rm = osb->recovery_map;
  1098. int *rm_quota = NULL;
  1099. int rm_quota_used = 0, i;
  1100. struct ocfs2_quota_recovery *qrec;
  1101. mlog_entry_void();
  1102. status = ocfs2_wait_on_mount(osb);
  1103. if (status < 0) {
  1104. goto bail;
  1105. }
  1106. rm_quota = kzalloc(osb->max_slots * sizeof(int), GFP_NOFS);
  1107. if (!rm_quota) {
  1108. status = -ENOMEM;
  1109. goto bail;
  1110. }
  1111. restart:
  1112. status = ocfs2_super_lock(osb, 1);
  1113. if (status < 0) {
  1114. mlog_errno(status);
  1115. goto bail;
  1116. }
  1117. status = ocfs2_compute_replay_slots(osb);
  1118. if (status < 0)
  1119. mlog_errno(status);
  1120. /* queue recovery for our own slot */
  1121. ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
  1122. NULL, NULL);
  1123. spin_lock(&osb->osb_lock);
  1124. while (rm->rm_used) {
  1125. /* It's always safe to remove entry zero, as we won't
  1126. * clear it until ocfs2_recover_node() has succeeded. */
  1127. node_num = rm->rm_entries[0];
  1128. spin_unlock(&osb->osb_lock);
  1129. mlog(0, "checking node %d\n", node_num);
  1130. slot_num = ocfs2_node_num_to_slot(osb, node_num);
  1131. if (slot_num == -ENOENT) {
  1132. status = 0;
  1133. mlog(0, "no slot for this node, so no recovery"
  1134. "required.\n");
  1135. goto skip_recovery;
  1136. }
  1137. mlog(0, "node %d was using slot %d\n", node_num, slot_num);
  1138. /* It is a bit subtle with quota recovery. We cannot do it
  1139. * immediately because we have to obtain cluster locks from
  1140. * quota files and we also don't want to just skip it because
  1141. * then quota usage would be out of sync until some node takes
  1142. * the slot. So we remember which nodes need quota recovery
  1143. * and when everything else is done, we recover quotas. */
  1144. for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++);
  1145. if (i == rm_quota_used)
  1146. rm_quota[rm_quota_used++] = slot_num;
  1147. status = ocfs2_recover_node(osb, node_num, slot_num);
  1148. skip_recovery:
  1149. if (!status) {
  1150. ocfs2_recovery_map_clear(osb, node_num);
  1151. } else {
  1152. mlog(ML_ERROR,
  1153. "Error %d recovering node %d on device (%u,%u)!\n",
  1154. status, node_num,
  1155. MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
  1156. mlog(ML_ERROR, "Volume requires unmount.\n");
  1157. }
  1158. spin_lock(&osb->osb_lock);
  1159. }
  1160. spin_unlock(&osb->osb_lock);
  1161. mlog(0, "All nodes recovered\n");
  1162. /* Refresh all journal recovery generations from disk */
  1163. status = ocfs2_check_journals_nolocks(osb);
  1164. status = (status == -EROFS) ? 0 : status;
  1165. if (status < 0)
  1166. mlog_errno(status);
  1167. /* Now it is right time to recover quotas... We have to do this under
  1168. * superblock lock so that noone can start using the slot (and crash)
  1169. * before we recover it */
  1170. for (i = 0; i < rm_quota_used; i++) {
  1171. qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
  1172. if (IS_ERR(qrec)) {
  1173. status = PTR_ERR(qrec);
  1174. mlog_errno(status);
  1175. continue;
  1176. }
  1177. ocfs2_queue_recovery_completion(osb->journal, rm_quota[i],
  1178. NULL, NULL, qrec);
  1179. }
  1180. ocfs2_super_unlock(osb, 1);
  1181. /* queue recovery for offline slots */
  1182. ocfs2_queue_replay_slots(osb);
  1183. bail:
  1184. mutex_lock(&osb->recovery_lock);
  1185. if (!status && !ocfs2_recovery_completed(osb)) {
  1186. mutex_unlock(&osb->recovery_lock);
  1187. goto restart;
  1188. }
  1189. ocfs2_free_replay_slots(osb);
  1190. osb->recovery_thread_task = NULL;
  1191. mb(); /* sync with ocfs2_recovery_thread_running */
  1192. wake_up(&osb->recovery_event);
  1193. mutex_unlock(&osb->recovery_lock);
  1194. if (rm_quota)
  1195. kfree(rm_quota);
  1196. mlog_exit(status);
  1197. /* no one is callint kthread_stop() for us so the kthread() api
  1198. * requires that we call do_exit(). And it isn't exported, but
  1199. * complete_and_exit() seems to be a minimal wrapper around it. */
  1200. complete_and_exit(NULL, status);
  1201. return status;
  1202. }
  1203. void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
  1204. {
  1205. mlog_entry("(node_num=%d, osb->node_num = %d)\n",
  1206. node_num, osb->node_num);
  1207. mutex_lock(&osb->recovery_lock);
  1208. if (osb->disable_recovery)
  1209. goto out;
  1210. /* People waiting on recovery will wait on
  1211. * the recovery map to empty. */
  1212. if (ocfs2_recovery_map_set(osb, node_num))
  1213. mlog(0, "node %d already in recovery map.\n", node_num);
  1214. mlog(0, "starting recovery thread...\n");
  1215. if (osb->recovery_thread_task)
  1216. goto out;
  1217. osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb,
  1218. "ocfs2rec");
  1219. if (IS_ERR(osb->recovery_thread_task)) {
  1220. mlog_errno((int)PTR_ERR(osb->recovery_thread_task));
  1221. osb->recovery_thread_task = NULL;
  1222. }
  1223. out:
  1224. mutex_unlock(&osb->recovery_lock);
  1225. wake_up(&osb->recovery_event);
  1226. mlog_exit_void();
  1227. }
  1228. static int ocfs2_read_journal_inode(struct ocfs2_super *osb,
  1229. int slot_num,
  1230. struct buffer_head **bh,
  1231. struct inode **ret_inode)
  1232. {
  1233. int status = -EACCES;
  1234. struct inode *inode = NULL;
  1235. BUG_ON(slot_num >= osb->max_slots);
  1236. inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
  1237. slot_num);
  1238. if (!inode || is_bad_inode(inode)) {
  1239. mlog_errno(status);
  1240. goto bail;
  1241. }
  1242. SET_INODE_JOURNAL(inode);
  1243. status = ocfs2_read_inode_block_full(inode, bh, OCFS2_BH_IGNORE_CACHE);
  1244. if (status < 0) {
  1245. mlog_errno(status);
  1246. goto bail;
  1247. }
  1248. status = 0;
  1249. bail:
  1250. if (inode) {
  1251. if (status || !ret_inode)
  1252. iput(inode);
  1253. else
  1254. *ret_inode = inode;
  1255. }
  1256. return status;
  1257. }
  1258. /* Does the actual journal replay and marks the journal inode as
  1259. * clean. Will only replay if the journal inode is marked dirty. */
  1260. static int ocfs2_replay_journal(struct ocfs2_super *osb,
  1261. int node_num,
  1262. int slot_num)
  1263. {
  1264. int status;
  1265. int got_lock = 0;
  1266. unsigned int flags;
  1267. struct inode *inode = NULL;
  1268. struct ocfs2_dinode *fe;
  1269. journal_t *journal = NULL;
  1270. struct buffer_head *bh = NULL;
  1271. u32 slot_reco_gen;
  1272. status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode);
  1273. if (status) {
  1274. mlog_errno(status);
  1275. goto done;
  1276. }
  1277. fe = (struct ocfs2_dinode *)bh->b_data;
  1278. slot_reco_gen = ocfs2_get_recovery_generation(fe);
  1279. brelse(bh);
  1280. bh = NULL;
  1281. /*
  1282. * As the fs recovery is asynchronous, there is a small chance that
  1283. * another node mounted (and recovered) the slot before the recovery
  1284. * thread could get the lock. To handle that, we dirty read the journal
  1285. * inode for that slot to get the recovery generation. If it is
  1286. * different than what we expected, the slot has been recovered.
  1287. * If not, it needs recovery.
  1288. */
  1289. if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) {
  1290. mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num,
  1291. osb->slot_recovery_generations[slot_num], slot_reco_gen);
  1292. osb->slot_recovery_generations[slot_num] = slot_reco_gen;
  1293. status = -EBUSY;
  1294. goto done;
  1295. }
  1296. /* Continue with recovery as the journal has not yet been recovered */
  1297. status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
  1298. if (status < 0) {
  1299. mlog(0, "status returned from ocfs2_inode_lock=%d\n", status);
  1300. if (status != -ERESTARTSYS)
  1301. mlog(ML_ERROR, "Could not lock journal!\n");
  1302. goto done;
  1303. }
  1304. got_lock = 1;
  1305. fe = (struct ocfs2_dinode *) bh->b_data;
  1306. flags = le32_to_cpu(fe->id1.journal1.ij_flags);
  1307. slot_reco_gen = ocfs2_get_recovery_generation(fe);
  1308. if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
  1309. mlog(0, "No recovery required for node %d\n", node_num);
  1310. /* Refresh recovery generation for the slot */
  1311. osb->slot_recovery_generations[slot_num] = slot_reco_gen;
  1312. goto done;
  1313. }
  1314. /* we need to run complete recovery for offline orphan slots */
  1315. ocfs2_replay_map_set_state(osb, REPLAY_NEEDED);
  1316. mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n",
  1317. node_num, slot_num,
  1318. MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
  1319. OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
  1320. status = ocfs2_force_read_journal(inode);
  1321. if (status < 0) {
  1322. mlog_errno(status);
  1323. goto done;
  1324. }
  1325. mlog(0, "calling journal_init_inode\n");
  1326. journal = jbd2_journal_init_inode(inode);
  1327. if (journal == NULL) {
  1328. mlog(ML_ERROR, "Linux journal layer error\n");
  1329. status = -EIO;
  1330. goto done;
  1331. }
  1332. status = jbd2_journal_load(journal);
  1333. if (status < 0) {
  1334. mlog_errno(status);
  1335. if (!igrab(inode))
  1336. BUG();
  1337. jbd2_journal_destroy(journal);
  1338. goto done;
  1339. }
  1340. ocfs2_clear_journal_error(osb->sb, journal, slot_num);
  1341. /* wipe the journal */
  1342. mlog(0, "flushing the journal.\n");
  1343. jbd2_journal_lock_updates(journal);
  1344. status = jbd2_journal_flush(journal);
  1345. jbd2_journal_unlock_updates(journal);
  1346. if (status < 0)
  1347. mlog_errno(status);
  1348. /* This will mark the node clean */
  1349. flags = le32_to_cpu(fe->id1.journal1.ij_flags);
  1350. flags &= ~OCFS2_JOURNAL_DIRTY_FL;
  1351. fe->id1.journal1.ij_flags = cpu_to_le32(flags);
  1352. /* Increment recovery generation to indicate successful recovery */
  1353. ocfs2_bump_recovery_generation(fe);
  1354. osb->slot_recovery_generations[slot_num] =
  1355. ocfs2_get_recovery_generation(fe);
  1356. ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check);
  1357. status = ocfs2_write_block(osb, bh, INODE_CACHE(inode));
  1358. if (status < 0)
  1359. mlog_errno(status);
  1360. if (!igrab(inode))
  1361. BUG();
  1362. jbd2_journal_destroy(journal);
  1363. done:
  1364. /* drop the lock on this nodes journal */
  1365. if (got_lock)
  1366. ocfs2_inode_unlock(inode, 1);
  1367. if (inode)
  1368. iput(inode);
  1369. brelse(bh);
  1370. mlog_exit(status);
  1371. return status;
  1372. }
  1373. /*
  1374. * Do the most important parts of node recovery:
  1375. * - Replay it's journal
  1376. * - Stamp a clean local allocator file
  1377. * - Stamp a clean truncate log
  1378. * - Mark the node clean
  1379. *
  1380. * If this function completes without error, a node in OCFS2 can be
  1381. * said to have been safely recovered. As a result, failure during the
  1382. * second part of a nodes recovery process (local alloc recovery) is
  1383. * far less concerning.
  1384. */
  1385. static int ocfs2_recover_node(struct ocfs2_super *osb,
  1386. int node_num, int slot_num)
  1387. {
  1388. int status = 0;
  1389. struct ocfs2_dinode *la_copy = NULL;
  1390. struct ocfs2_dinode *tl_copy = NULL;
  1391. mlog_entry("(node_num=%d, slot_num=%d, osb->node_num = %d)\n",
  1392. node_num, slot_num, osb->node_num);
  1393. /* Should not ever be called to recover ourselves -- in that
  1394. * case we should've called ocfs2_journal_load instead. */
  1395. BUG_ON(osb->node_num == node_num);
  1396. status = ocfs2_replay_journal(osb, node_num, slot_num);
  1397. if (status < 0) {
  1398. if (status == -EBUSY) {
  1399. mlog(0, "Skipping recovery for slot %u (node %u) "
  1400. "as another node has recovered it\n", slot_num,
  1401. node_num);
  1402. status = 0;
  1403. goto done;
  1404. }
  1405. mlog_errno(status);
  1406. goto done;
  1407. }
  1408. /* Stamp a clean local alloc file AFTER recovering the journal... */
  1409. status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy);
  1410. if (status < 0) {
  1411. mlog_errno(status);
  1412. goto done;
  1413. }
  1414. /* An error from begin_truncate_log_recovery is not
  1415. * serious enough to warrant halting the rest of
  1416. * recovery. */
  1417. status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy);
  1418. if (status < 0)
  1419. mlog_errno(status);
  1420. /* Likewise, this would be a strange but ultimately not so
  1421. * harmful place to get an error... */
  1422. status = ocfs2_clear_slot(osb, slot_num);
  1423. if (status < 0)
  1424. mlog_errno(status);
  1425. /* This will kfree the memory pointed to by la_copy and tl_copy */
  1426. ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy,
  1427. tl_copy, NULL);
  1428. status = 0;
  1429. done:
  1430. mlog_exit(status);
  1431. return status;
  1432. }
  1433. /* Test node liveness by trylocking his journal. If we get the lock,
  1434. * we drop it here. Return 0 if we got the lock, -EAGAIN if node is
  1435. * still alive (we couldn't get the lock) and < 0 on error. */
  1436. static int ocfs2_trylock_journal(struct ocfs2_super *osb,
  1437. int slot_num)
  1438. {
  1439. int status, flags;
  1440. struct inode *inode = NULL;
  1441. inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
  1442. slot_num);
  1443. if (inode == NULL) {
  1444. mlog(ML_ERROR, "access error\n");
  1445. status = -EACCES;
  1446. goto bail;
  1447. }
  1448. if (is_bad_inode(inode)) {
  1449. mlog(ML_ERROR, "access error (bad inode)\n");
  1450. iput(inode);
  1451. inode = NULL;
  1452. status = -EACCES;
  1453. goto bail;
  1454. }
  1455. SET_INODE_JOURNAL(inode);
  1456. flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE;
  1457. status = ocfs2_inode_lock_full(inode, NULL, 1, flags);
  1458. if (status < 0) {
  1459. if (status != -EAGAIN)
  1460. mlog_errno(status);
  1461. goto bail;
  1462. }
  1463. ocfs2_inode_unlock(inode, 1);
  1464. bail:
  1465. if (inode)
  1466. iput(inode);
  1467. return status;
  1468. }
  1469. /* Call this underneath ocfs2_super_lock. It also assumes that the
  1470. * slot info struct has been updated from disk. */
  1471. int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
  1472. {
  1473. unsigned int node_num;
  1474. int status, i;
  1475. u32 gen;
  1476. struct buffer_head *bh = NULL;
  1477. struct ocfs2_dinode *di;
  1478. /* This is called with the super block cluster lock, so we
  1479. * know that the slot map can't change underneath us. */
  1480. for (i = 0; i < osb->max_slots; i++) {
  1481. /* Read journal inode to get the recovery generation */
  1482. status = ocfs2_read_journal_inode(osb, i, &bh, NULL);
  1483. if (status) {
  1484. mlog_errno(status);
  1485. goto bail;
  1486. }
  1487. di = (struct ocfs2_dinode *)bh->b_data;
  1488. gen = ocfs2_get_recovery_generation(di);
  1489. brelse(bh);
  1490. bh = NULL;
  1491. spin_lock(&osb->osb_lock);
  1492. osb->slot_recovery_generations[i] = gen;
  1493. mlog(0, "Slot %u recovery generation is %u\n", i,
  1494. osb->slot_recovery_generations[i]);
  1495. if (i == osb->slot_num) {
  1496. spin_unlock(&osb->osb_lock);
  1497. continue;
  1498. }
  1499. status = ocfs2_slot_to_node_num_locked(osb, i, &node_num);
  1500. if (status == -ENOENT) {
  1501. spin_unlock(&osb->osb_lock);
  1502. continue;
  1503. }
  1504. if (__ocfs2_recovery_map_test(osb, node_num)) {
  1505. spin_unlock(&osb->osb_lock);
  1506. continue;
  1507. }
  1508. spin_unlock(&osb->osb_lock);
  1509. /* Ok, we have a slot occupied by another node which
  1510. * is not in the recovery map. We trylock his journal
  1511. * file here to test if he's alive. */
  1512. status = ocfs2_trylock_journal(osb, i);
  1513. if (!status) {
  1514. /* Since we're called from mount, we know that
  1515. * the recovery thread can't race us on
  1516. * setting / checking the recovery bits. */
  1517. ocfs2_recovery_thread(osb, node_num);
  1518. } else if ((status < 0) && (status != -EAGAIN)) {
  1519. mlog_errno(status);
  1520. goto bail;
  1521. }
  1522. }
  1523. status = 0;
  1524. bail:
  1525. mlog_exit(status);
  1526. return status;
  1527. }
  1528. /*
  1529. * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some
  1530. * randomness to the timeout to minimize multple nodes firing the timer at the
  1531. * same time.
  1532. */
  1533. static inline unsigned long ocfs2_orphan_scan_timeout(void)
  1534. {
  1535. unsigned long time;
  1536. get_random_bytes(&time, sizeof(time));
  1537. time = ORPHAN_SCAN_SCHEDULE_TIMEOUT + (time % 5000);
  1538. return msecs_to_jiffies(time);
  1539. }
  1540. /*
  1541. * ocfs2_queue_orphan_scan calls ocfs2_queue_recovery_completion for
  1542. * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This
  1543. * is done to catch any orphans that are left over in orphan directories.
  1544. *
  1545. * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT
  1546. * seconds. It gets an EX lock on os_lockres and checks sequence number
  1547. * stored in LVB. If the sequence number has changed, it means some other
  1548. * node has done the scan. This node skips the scan and tracks the
  1549. * sequence number. If the sequence number didn't change, it means a scan
  1550. * hasn't happened. The node queues a scan and increments the
  1551. * sequence number in the LVB.
  1552. */
  1553. void ocfs2_queue_orphan_scan(struct ocfs2_super *osb)
  1554. {
  1555. struct ocfs2_orphan_scan *os;
  1556. int status, i;
  1557. u32 seqno = 0;
  1558. os = &osb->osb_orphan_scan;
  1559. if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
  1560. goto out;
  1561. status = ocfs2_orphan_scan_lock(osb, &seqno);
  1562. if (status < 0) {
  1563. if (status != -EAGAIN)
  1564. mlog_errno(status);
  1565. goto out;
  1566. }
  1567. /* Do no queue the tasks if the volume is being umounted */
  1568. if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
  1569. goto unlock;
  1570. if (os->os_seqno != seqno) {
  1571. os->os_seqno = seqno;
  1572. goto unlock;
  1573. }
  1574. for (i = 0; i < osb->max_slots; i++)
  1575. ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL,
  1576. NULL);
  1577. /*
  1578. * We queued a recovery on orphan slots, increment the sequence
  1579. * number and update LVB so other node will skip the scan for a while
  1580. */
  1581. seqno++;
  1582. os->os_count++;
  1583. os->os_scantime = CURRENT_TIME;
  1584. unlock:
  1585. ocfs2_orphan_scan_unlock(osb, seqno);
  1586. out:
  1587. return;
  1588. }
  1589. /* Worker task that gets fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT millsec */
  1590. void ocfs2_orphan_scan_work(struct work_struct *work)
  1591. {
  1592. struct ocfs2_orphan_scan *os;
  1593. struct ocfs2_super *osb;
  1594. os = container_of(work, struct ocfs2_orphan_scan,
  1595. os_orphan_scan_work.work);
  1596. osb = os->os_osb;
  1597. mutex_lock(&os->os_lock);
  1598. ocfs2_queue_orphan_scan(osb);
  1599. if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
  1600. schedule_delayed_work(&os->os_orphan_scan_work,
  1601. ocfs2_orphan_scan_timeout());
  1602. mutex_unlock(&os->os_lock);
  1603. }
  1604. void ocfs2_orphan_scan_stop(struct ocfs2_super *osb)
  1605. {
  1606. struct ocfs2_orphan_scan *os;
  1607. os = &osb->osb_orphan_scan;
  1608. if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) {
  1609. atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
  1610. mutex_lock(&os->os_lock);
  1611. cancel_delayed_work(&os->os_orphan_scan_work);
  1612. mutex_unlock(&os->os_lock);
  1613. }
  1614. }
  1615. void ocfs2_orphan_scan_init(struct ocfs2_super *osb)
  1616. {
  1617. struct ocfs2_orphan_scan *os;
  1618. os = &osb->osb_orphan_scan;
  1619. os->os_osb = osb;
  1620. os->os_count = 0;
  1621. os->os_seqno = 0;
  1622. mutex_init(&os->os_lock);
  1623. INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work);
  1624. }
  1625. void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
  1626. {
  1627. struct ocfs2_orphan_scan *os;
  1628. os = &osb->osb_orphan_scan;
  1629. os->os_scantime = CURRENT_TIME;
  1630. if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
  1631. atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
  1632. else {
  1633. atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
  1634. schedule_delayed_work(&os->os_orphan_scan_work,
  1635. ocfs2_orphan_scan_timeout());
  1636. }
  1637. }
  1638. struct ocfs2_orphan_filldir_priv {
  1639. struct inode *head;
  1640. struct ocfs2_super *osb;
  1641. };
  1642. static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len,
  1643. loff_t pos, u64 ino, unsigned type)
  1644. {
  1645. struct ocfs2_orphan_filldir_priv *p = priv;
  1646. struct inode *iter;
  1647. if (name_len == 1 && !strncmp(".", name, 1))
  1648. return 0;
  1649. if (name_len == 2 && !strncmp("..", name, 2))
  1650. return 0;
  1651. /* Skip bad inodes so that recovery can continue */
  1652. iter = ocfs2_iget(p->osb, ino,
  1653. OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0);
  1654. if (IS_ERR(iter))
  1655. return 0;
  1656. mlog(0, "queue orphan %llu\n",
  1657. (unsigned long long)OCFS2_I(iter)->ip_blkno);
  1658. /* No locking is required for the next_orphan queue as there
  1659. * is only ever a single process doing orphan recovery. */
  1660. OCFS2_I(iter)->ip_next_orphan = p->head;
  1661. p->head = iter;
  1662. return 0;
  1663. }
  1664. static int ocfs2_queue_orphans(struct ocfs2_super *osb,
  1665. int slot,
  1666. struct inode **head)
  1667. {
  1668. int status;
  1669. struct inode *orphan_dir_inode = NULL;
  1670. struct ocfs2_orphan_filldir_priv priv;
  1671. loff_t pos = 0;
  1672. priv.osb = osb;
  1673. priv.head = *head;
  1674. orphan_dir_inode = ocfs2_get_system_file_inode(osb,
  1675. ORPHAN_DIR_SYSTEM_INODE,
  1676. slot);
  1677. if (!orphan_dir_inode) {
  1678. status = -ENOENT;
  1679. mlog_errno(status);
  1680. return status;
  1681. }
  1682. mutex_lock(&orphan_dir_inode->i_mutex);
  1683. status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
  1684. if (status < 0) {
  1685. mlog_errno(status);
  1686. goto out;
  1687. }
  1688. status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv,
  1689. ocfs2_orphan_filldir);
  1690. if (status) {
  1691. mlog_errno(status);
  1692. goto out_cluster;
  1693. }
  1694. *head = priv.head;
  1695. out_cluster:
  1696. ocfs2_inode_unlock(orphan_dir_inode, 0);
  1697. out:
  1698. mutex_unlock(&orphan_dir_inode->i_mutex);
  1699. iput(orphan_dir_inode);
  1700. return status;
  1701. }
  1702. static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb,
  1703. int slot)
  1704. {
  1705. int ret;
  1706. spin_lock(&osb->osb_lock);
  1707. ret = !osb->osb_orphan_wipes[slot];
  1708. spin_unlock(&osb->osb_lock);
  1709. return ret;
  1710. }
  1711. static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb,
  1712. int slot)
  1713. {
  1714. spin_lock(&osb->osb_lock);
  1715. /* Mark ourselves such that new processes in delete_inode()
  1716. * know to quit early. */
  1717. ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
  1718. while (osb->osb_orphan_wipes[slot]) {
  1719. /* If any processes are already in the middle of an
  1720. * orphan wipe on this dir, then we need to wait for
  1721. * them. */
  1722. spin_unlock(&osb->osb_lock);
  1723. wait_event_interruptible(osb->osb_wipe_event,
  1724. ocfs2_orphan_recovery_can_continue(osb, slot));
  1725. spin_lock(&osb->osb_lock);
  1726. }
  1727. spin_unlock(&osb->osb_lock);
  1728. }
  1729. static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb,
  1730. int slot)
  1731. {
  1732. ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot);
  1733. }
  1734. /*
  1735. * Orphan recovery. Each mounted node has it's own orphan dir which we
  1736. * must run during recovery. Our strategy here is to build a list of
  1737. * the inodes in the orphan dir and iget/iput them. The VFS does
  1738. * (most) of the rest of the work.
  1739. *
  1740. * Orphan recovery can happen at any time, not just mount so we have a
  1741. * couple of extra considerations.
  1742. *
  1743. * - We grab as many inodes as we can under the orphan dir lock -
  1744. * doing iget() outside the orphan dir risks getting a reference on
  1745. * an invalid inode.
  1746. * - We must be sure not to deadlock with other processes on the
  1747. * system wanting to run delete_inode(). This can happen when they go
  1748. * to lock the orphan dir and the orphan recovery process attempts to
  1749. * iget() inside the orphan dir lock. This can be avoided by
  1750. * advertising our state to ocfs2_delete_inode().
  1751. */
  1752. static int ocfs2_recover_orphans(struct ocfs2_super *osb,
  1753. int slot)
  1754. {
  1755. int ret = 0;
  1756. struct inode *inode = NULL;
  1757. struct inode *iter;
  1758. struct ocfs2_inode_info *oi;
  1759. mlog(0, "Recover inodes from orphan dir in slot %d\n", slot);
  1760. ocfs2_mark_recovering_orphan_dir(osb, slot);
  1761. ret = ocfs2_queue_orphans(osb, slot, &inode);
  1762. ocfs2_clear_recovering_orphan_dir(osb, slot);
  1763. /* Error here should be noted, but we want to continue with as
  1764. * many queued inodes as we've got. */
  1765. if (ret)
  1766. mlog_errno(ret);
  1767. while (inode) {
  1768. oi = OCFS2_I(inode);
  1769. mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno);
  1770. iter = oi->ip_next_orphan;
  1771. spin_lock(&oi->ip_lock);
  1772. /* The remote delete code may have set these on the
  1773. * assumption that the other node would wipe them
  1774. * successfully. If they are still in the node's
  1775. * orphan dir, we need to reset that state. */
  1776. oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE);
  1777. /* Set the proper information to get us going into
  1778. * ocfs2_delete_inode. */
  1779. oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
  1780. spin_unlock(&oi->ip_lock);
  1781. iput(inode);
  1782. inode = iter;
  1783. }
  1784. return ret;
  1785. }
  1786. static int __ocfs2_wait_on_mount(struct ocfs2_super *osb, int quota)
  1787. {
  1788. /* This check is good because ocfs2 will wait on our recovery
  1789. * thread before changing it to something other than MOUNTED
  1790. * or DISABLED. */
  1791. wait_event(osb->osb_mount_event,
  1792. (!quota && atomic_read(&osb->vol_state) == VOLUME_MOUNTED) ||
  1793. atomic_read(&osb->vol_state) == VOLUME_MOUNTED_QUOTAS ||
  1794. atomic_read(&osb->vol_state) == VOLUME_DISABLED);
  1795. /* If there's an error on mount, then we may never get to the
  1796. * MOUNTED flag, but this is set right before
  1797. * dismount_volume() so we can trust it. */
  1798. if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
  1799. mlog(0, "mount error, exiting!\n");
  1800. return -EBUSY;
  1801. }
  1802. return 0;
  1803. }
  1804. static int ocfs2_commit_thread(void *arg)
  1805. {
  1806. int status;
  1807. struct ocfs2_super *osb = arg;
  1808. struct ocfs2_journal *journal = osb->journal;
  1809. /* we can trust j_num_trans here because _should_stop() is only set in
  1810. * shutdown and nobody other than ourselves should be able to start
  1811. * transactions. committing on shutdown might take a few iterations
  1812. * as final transactions put deleted inodes on the list */
  1813. while (!(kthread_should_stop() &&
  1814. atomic_read(&journal->j_num_trans) == 0)) {
  1815. wait_event_interruptible(osb->checkpoint_event,
  1816. atomic_read(&journal->j_num_trans)
  1817. || kthread_should_stop());
  1818. status = ocfs2_commit_cache(osb);
  1819. if (status < 0)
  1820. mlog_errno(status);
  1821. if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){
  1822. mlog(ML_KTHREAD,
  1823. "commit_thread: %u transactions pending on "
  1824. "shutdown\n",
  1825. atomic_read(&journal->j_num_trans));
  1826. }
  1827. }
  1828. return 0;
  1829. }
  1830. /* Reads all the journal inodes without taking any cluster locks. Used
  1831. * for hard readonly access to determine whether any journal requires
  1832. * recovery. Also used to refresh the recovery generation numbers after
  1833. * a journal has been recovered by another node.
  1834. */
  1835. int ocfs2_check_journals_nolocks(struct ocfs2_super *osb)
  1836. {
  1837. int ret = 0;
  1838. unsigned int slot;
  1839. struct buffer_head *di_bh = NULL;
  1840. struct ocfs2_dinode *di;
  1841. int journal_dirty = 0;
  1842. for(slot = 0; slot < osb->max_slots; slot++) {
  1843. ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL);
  1844. if (ret) {
  1845. mlog_errno(ret);
  1846. goto out;
  1847. }
  1848. di = (struct ocfs2_dinode *) di_bh->b_data;
  1849. osb->slot_recovery_generations[slot] =
  1850. ocfs2_get_recovery_generation(di);
  1851. if (le32_to_cpu(di->id1.journal1.ij_flags) &
  1852. OCFS2_JOURNAL_DIRTY_FL)
  1853. journal_dirty = 1;
  1854. brelse(di_bh);
  1855. di_bh = NULL;
  1856. }
  1857. out:
  1858. if (journal_dirty)
  1859. ret = -EROFS;
  1860. return ret;
  1861. }