dlmrecovery.c 60 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * dlmrecovery.c
  5. *
  6. * recovery stuff
  7. *
  8. * Copyright (C) 2004 Oracle. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2 of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public
  21. * License along with this program; if not, write to the
  22. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23. * Boston, MA 021110-1307, USA.
  24. *
  25. */
  26. #include <linux/module.h>
  27. #include <linux/fs.h>
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/highmem.h>
  31. #include <linux/utsname.h>
  32. #include <linux/init.h>
  33. #include <linux/sysctl.h>
  34. #include <linux/random.h>
  35. #include <linux/blkdev.h>
  36. #include <linux/socket.h>
  37. #include <linux/inet.h>
  38. #include <linux/timer.h>
  39. #include <linux/kthread.h>
  40. #include "cluster/heartbeat.h"
  41. #include "cluster/nodemanager.h"
  42. #include "cluster/tcp.h"
  43. #include "dlmapi.h"
  44. #include "dlmcommon.h"
  45. #include "dlmdomain.h"
  46. #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
  47. #include "cluster/masklog.h"
  48. static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
  49. static int dlm_recovery_thread(void *data);
  50. void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
  51. int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
  52. static void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
  53. static int dlm_do_recovery(struct dlm_ctxt *dlm);
  54. static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
  55. static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
  56. static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
  57. static int dlm_request_all_locks(struct dlm_ctxt *dlm,
  58. u8 request_from, u8 dead_node);
  59. static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
  60. static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
  61. static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
  62. const char *lockname, int namelen,
  63. int total_locks, u64 cookie,
  64. u8 flags, u8 master);
  65. static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
  66. struct dlm_migratable_lockres *mres,
  67. u8 send_to,
  68. struct dlm_lock_resource *res,
  69. int total_locks);
  70. static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
  71. struct dlm_lock_resource *res,
  72. u8 *real_master);
  73. static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
  74. struct dlm_lock_resource *res,
  75. struct dlm_migratable_lockres *mres);
  76. static int dlm_do_master_requery(struct dlm_ctxt *dlm,
  77. struct dlm_lock_resource *res,
  78. u8 nodenum, u8 *real_master);
  79. static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
  80. static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
  81. u8 dead_node, u8 send_to);
  82. static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
  83. static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
  84. struct list_head *list, u8 dead_node);
  85. static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
  86. u8 dead_node, u8 new_master);
  87. static void dlm_reco_ast(void *astdata);
  88. static void dlm_reco_bast(void *astdata, int blocked_type);
  89. static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
  90. static void dlm_request_all_locks_worker(struct dlm_work_item *item,
  91. void *data);
  92. static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
  93. static u64 dlm_get_next_mig_cookie(void);
  94. static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED;
  95. static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED;
  96. static u64 dlm_mig_cookie = 1;
  97. static u64 dlm_get_next_mig_cookie(void)
  98. {
  99. u64 c;
  100. spin_lock(&dlm_mig_cookie_lock);
  101. c = dlm_mig_cookie;
  102. if (dlm_mig_cookie == (~0ULL))
  103. dlm_mig_cookie = 1;
  104. else
  105. dlm_mig_cookie++;
  106. spin_unlock(&dlm_mig_cookie_lock);
  107. return c;
  108. }
  109. static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
  110. {
  111. spin_lock(&dlm->spinlock);
  112. clear_bit(dlm->reco.dead_node, dlm->recovery_map);
  113. dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
  114. dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
  115. spin_unlock(&dlm->spinlock);
  116. }
  117. /* Worker function used during recovery. */
  118. void dlm_dispatch_work(void *data)
  119. {
  120. struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
  121. LIST_HEAD(tmp_list);
  122. struct list_head *iter, *iter2;
  123. struct dlm_work_item *item;
  124. dlm_workfunc_t *workfunc;
  125. spin_lock(&dlm->work_lock);
  126. list_splice_init(&dlm->work_list, &tmp_list);
  127. spin_unlock(&dlm->work_lock);
  128. list_for_each_safe(iter, iter2, &tmp_list) {
  129. item = list_entry(iter, struct dlm_work_item, list);
  130. workfunc = item->func;
  131. list_del_init(&item->list);
  132. /* already have ref on dlm to avoid having
  133. * it disappear. just double-check. */
  134. BUG_ON(item->dlm != dlm);
  135. /* this is allowed to sleep and
  136. * call network stuff */
  137. workfunc(item, item->data);
  138. dlm_put(dlm);
  139. kfree(item);
  140. }
  141. }
  142. /*
  143. * RECOVERY THREAD
  144. */
  145. static void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
  146. {
  147. /* wake the recovery thread
  148. * this will wake the reco thread in one of three places
  149. * 1) sleeping with no recovery happening
  150. * 2) sleeping with recovery mastered elsewhere
  151. * 3) recovery mastered here, waiting on reco data */
  152. wake_up(&dlm->dlm_reco_thread_wq);
  153. }
  154. /* Launch the recovery thread */
  155. int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
  156. {
  157. mlog(0, "starting dlm recovery thread...\n");
  158. dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
  159. "dlm_reco_thread");
  160. if (IS_ERR(dlm->dlm_reco_thread_task)) {
  161. mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
  162. dlm->dlm_reco_thread_task = NULL;
  163. return -EINVAL;
  164. }
  165. return 0;
  166. }
  167. void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
  168. {
  169. if (dlm->dlm_reco_thread_task) {
  170. mlog(0, "waiting for dlm recovery thread to exit\n");
  171. kthread_stop(dlm->dlm_reco_thread_task);
  172. dlm->dlm_reco_thread_task = NULL;
  173. }
  174. }
  175. /*
  176. * this is lame, but here's how recovery works...
  177. * 1) all recovery threads cluster wide will work on recovering
  178. * ONE node at a time
  179. * 2) negotiate who will take over all the locks for the dead node.
  180. * thats right... ALL the locks.
  181. * 3) once a new master is chosen, everyone scans all locks
  182. * and moves aside those mastered by the dead guy
  183. * 4) each of these locks should be locked until recovery is done
  184. * 5) the new master collects up all of secondary lock queue info
  185. * one lock at a time, forcing each node to communicate back
  186. * before continuing
  187. * 6) each secondary lock queue responds with the full known lock info
  188. * 7) once the new master has run all its locks, it sends a ALLDONE!
  189. * message to everyone
  190. * 8) upon receiving this message, the secondary queue node unlocks
  191. * and responds to the ALLDONE
  192. * 9) once the new master gets responses from everyone, he unlocks
  193. * everything and recovery for this dead node is done
  194. *10) go back to 2) while there are still dead nodes
  195. *
  196. */
  197. #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
  198. static int dlm_recovery_thread(void *data)
  199. {
  200. int status;
  201. struct dlm_ctxt *dlm = data;
  202. unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
  203. mlog(0, "dlm thread running for %s...\n", dlm->name);
  204. while (!kthread_should_stop()) {
  205. if (dlm_joined(dlm)) {
  206. status = dlm_do_recovery(dlm);
  207. if (status == -EAGAIN) {
  208. /* do not sleep, recheck immediately. */
  209. continue;
  210. }
  211. if (status < 0)
  212. mlog_errno(status);
  213. }
  214. wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
  215. kthread_should_stop(),
  216. timeout);
  217. }
  218. mlog(0, "quitting DLM recovery thread\n");
  219. return 0;
  220. }
  221. /* callers of the top-level api calls (dlmlock/dlmunlock) should
  222. * block on the dlm->reco.event when recovery is in progress.
  223. * the dlm recovery thread will set this state when it begins
  224. * recovering a dead node (as the new master or not) and clear
  225. * the state and wake as soon as all affected lock resources have
  226. * been marked with the RECOVERY flag */
  227. static int dlm_in_recovery(struct dlm_ctxt *dlm)
  228. {
  229. int in_recovery;
  230. spin_lock(&dlm->spinlock);
  231. in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
  232. spin_unlock(&dlm->spinlock);
  233. return in_recovery;
  234. }
  235. void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
  236. {
  237. wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
  238. }
  239. static void dlm_begin_recovery(struct dlm_ctxt *dlm)
  240. {
  241. spin_lock(&dlm->spinlock);
  242. BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
  243. dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
  244. spin_unlock(&dlm->spinlock);
  245. }
  246. static void dlm_end_recovery(struct dlm_ctxt *dlm)
  247. {
  248. spin_lock(&dlm->spinlock);
  249. BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
  250. dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
  251. spin_unlock(&dlm->spinlock);
  252. wake_up(&dlm->reco.event);
  253. }
  254. static int dlm_do_recovery(struct dlm_ctxt *dlm)
  255. {
  256. int status = 0;
  257. spin_lock(&dlm->spinlock);
  258. /* check to see if the new master has died */
  259. if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
  260. test_bit(dlm->reco.new_master, dlm->recovery_map)) {
  261. mlog(0, "new master %u died while recovering %u!\n",
  262. dlm->reco.new_master, dlm->reco.dead_node);
  263. /* unset the new_master, leave dead_node */
  264. dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
  265. }
  266. /* select a target to recover */
  267. if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
  268. int bit;
  269. bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
  270. if (bit >= O2NM_MAX_NODES || bit < 0)
  271. dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
  272. else
  273. dlm->reco.dead_node = bit;
  274. } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
  275. /* BUG? */
  276. mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
  277. dlm->reco.dead_node);
  278. dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
  279. }
  280. if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
  281. // mlog(0, "nothing to recover! sleeping now!\n");
  282. spin_unlock(&dlm->spinlock);
  283. /* return to main thread loop and sleep. */
  284. return 0;
  285. }
  286. mlog(0, "recovery thread found node %u in the recovery map!\n",
  287. dlm->reco.dead_node);
  288. spin_unlock(&dlm->spinlock);
  289. /* take write barrier */
  290. /* (stops the list reshuffling thread, proxy ast handling) */
  291. dlm_begin_recovery(dlm);
  292. if (dlm->reco.new_master == dlm->node_num)
  293. goto master_here;
  294. if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
  295. /* choose a new master */
  296. if (!dlm_pick_recovery_master(dlm)) {
  297. /* already notified everyone. go. */
  298. dlm->reco.new_master = dlm->node_num;
  299. goto master_here;
  300. }
  301. mlog(0, "another node will master this recovery session.\n");
  302. }
  303. mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n",
  304. dlm->name, dlm->reco.new_master,
  305. dlm->node_num, dlm->reco.dead_node);
  306. /* it is safe to start everything back up here
  307. * because all of the dead node's lock resources
  308. * have been marked as in-recovery */
  309. dlm_end_recovery(dlm);
  310. /* sleep out in main dlm_recovery_thread loop. */
  311. return 0;
  312. master_here:
  313. mlog(0, "mastering recovery of %s:%u here(this=%u)!\n",
  314. dlm->name, dlm->reco.dead_node, dlm->node_num);
  315. status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
  316. if (status < 0) {
  317. mlog(ML_ERROR, "error %d remastering locks for node %u, "
  318. "retrying.\n", status, dlm->reco.dead_node);
  319. } else {
  320. /* success! see if any other nodes need recovery */
  321. dlm_reset_recovery(dlm);
  322. }
  323. dlm_end_recovery(dlm);
  324. /* continue and look for another dead node */
  325. return -EAGAIN;
  326. }
  327. static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
  328. {
  329. int status = 0;
  330. struct dlm_reco_node_data *ndata;
  331. struct list_head *iter;
  332. int all_nodes_done;
  333. int destroy = 0;
  334. int pass = 0;
  335. status = dlm_init_recovery_area(dlm, dead_node);
  336. if (status < 0)
  337. goto leave;
  338. /* safe to access the node data list without a lock, since this
  339. * process is the only one to change the list */
  340. list_for_each(iter, &dlm->reco.node_data) {
  341. ndata = list_entry (iter, struct dlm_reco_node_data, list);
  342. BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
  343. ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
  344. mlog(0, "requesting lock info from node %u\n",
  345. ndata->node_num);
  346. if (ndata->node_num == dlm->node_num) {
  347. ndata->state = DLM_RECO_NODE_DATA_DONE;
  348. continue;
  349. }
  350. status = dlm_request_all_locks(dlm, ndata->node_num, dead_node);
  351. if (status < 0) {
  352. mlog_errno(status);
  353. if (dlm_is_host_down(status))
  354. ndata->state = DLM_RECO_NODE_DATA_DEAD;
  355. else {
  356. destroy = 1;
  357. goto leave;
  358. }
  359. }
  360. switch (ndata->state) {
  361. case DLM_RECO_NODE_DATA_INIT:
  362. case DLM_RECO_NODE_DATA_FINALIZE_SENT:
  363. case DLM_RECO_NODE_DATA_REQUESTED:
  364. BUG();
  365. break;
  366. case DLM_RECO_NODE_DATA_DEAD:
  367. mlog(0, "node %u died after requesting "
  368. "recovery info for node %u\n",
  369. ndata->node_num, dead_node);
  370. // start all over
  371. destroy = 1;
  372. status = -EAGAIN;
  373. goto leave;
  374. case DLM_RECO_NODE_DATA_REQUESTING:
  375. ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
  376. mlog(0, "now receiving recovery data from "
  377. "node %u for dead node %u\n",
  378. ndata->node_num, dead_node);
  379. break;
  380. case DLM_RECO_NODE_DATA_RECEIVING:
  381. mlog(0, "already receiving recovery data from "
  382. "node %u for dead node %u\n",
  383. ndata->node_num, dead_node);
  384. break;
  385. case DLM_RECO_NODE_DATA_DONE:
  386. mlog(0, "already DONE receiving recovery data "
  387. "from node %u for dead node %u\n",
  388. ndata->node_num, dead_node);
  389. break;
  390. }
  391. }
  392. mlog(0, "done requesting all lock info\n");
  393. /* nodes should be sending reco data now
  394. * just need to wait */
  395. while (1) {
  396. /* check all the nodes now to see if we are
  397. * done, or if anyone died */
  398. all_nodes_done = 1;
  399. spin_lock(&dlm_reco_state_lock);
  400. list_for_each(iter, &dlm->reco.node_data) {
  401. ndata = list_entry (iter, struct dlm_reco_node_data, list);
  402. mlog(0, "checking recovery state of node %u\n",
  403. ndata->node_num);
  404. switch (ndata->state) {
  405. case DLM_RECO_NODE_DATA_INIT:
  406. case DLM_RECO_NODE_DATA_REQUESTING:
  407. mlog(ML_ERROR, "bad ndata state for "
  408. "node %u: state=%d\n",
  409. ndata->node_num, ndata->state);
  410. BUG();
  411. break;
  412. case DLM_RECO_NODE_DATA_DEAD:
  413. mlog(0, "node %u died after "
  414. "requesting recovery info for "
  415. "node %u\n", ndata->node_num,
  416. dead_node);
  417. spin_unlock(&dlm_reco_state_lock);
  418. // start all over
  419. destroy = 1;
  420. status = -EAGAIN;
  421. goto leave;
  422. case DLM_RECO_NODE_DATA_RECEIVING:
  423. case DLM_RECO_NODE_DATA_REQUESTED:
  424. all_nodes_done = 0;
  425. break;
  426. case DLM_RECO_NODE_DATA_DONE:
  427. break;
  428. case DLM_RECO_NODE_DATA_FINALIZE_SENT:
  429. break;
  430. }
  431. }
  432. spin_unlock(&dlm_reco_state_lock);
  433. mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
  434. all_nodes_done?"yes":"no");
  435. if (all_nodes_done) {
  436. int ret;
  437. /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
  438. * just send a finalize message to everyone and
  439. * clean up */
  440. mlog(0, "all nodes are done! send finalize\n");
  441. ret = dlm_send_finalize_reco_message(dlm);
  442. if (ret < 0)
  443. mlog_errno(ret);
  444. spin_lock(&dlm->spinlock);
  445. dlm_finish_local_lockres_recovery(dlm, dead_node,
  446. dlm->node_num);
  447. spin_unlock(&dlm->spinlock);
  448. mlog(0, "should be done with recovery!\n");
  449. mlog(0, "finishing recovery of %s at %lu, "
  450. "dead=%u, this=%u, new=%u\n", dlm->name,
  451. jiffies, dlm->reco.dead_node,
  452. dlm->node_num, dlm->reco.new_master);
  453. destroy = 1;
  454. status = ret;
  455. /* rescan everything marked dirty along the way */
  456. dlm_kick_thread(dlm, NULL);
  457. break;
  458. }
  459. /* wait to be signalled, with periodic timeout
  460. * to check for node death */
  461. wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
  462. kthread_should_stop(),
  463. msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
  464. }
  465. leave:
  466. if (destroy)
  467. dlm_destroy_recovery_area(dlm, dead_node);
  468. mlog_exit(status);
  469. return status;
  470. }
  471. static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
  472. {
  473. int num=0;
  474. struct dlm_reco_node_data *ndata;
  475. spin_lock(&dlm->spinlock);
  476. memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
  477. /* nodes can only be removed (by dying) after dropping
  478. * this lock, and death will be trapped later, so this should do */
  479. spin_unlock(&dlm->spinlock);
  480. while (1) {
  481. num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
  482. if (num >= O2NM_MAX_NODES) {
  483. break;
  484. }
  485. BUG_ON(num == dead_node);
  486. ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL);
  487. if (!ndata) {
  488. dlm_destroy_recovery_area(dlm, dead_node);
  489. return -ENOMEM;
  490. }
  491. ndata->node_num = num;
  492. ndata->state = DLM_RECO_NODE_DATA_INIT;
  493. spin_lock(&dlm_reco_state_lock);
  494. list_add_tail(&ndata->list, &dlm->reco.node_data);
  495. spin_unlock(&dlm_reco_state_lock);
  496. num++;
  497. }
  498. return 0;
  499. }
  500. static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
  501. {
  502. struct list_head *iter, *iter2;
  503. struct dlm_reco_node_data *ndata;
  504. LIST_HEAD(tmplist);
  505. spin_lock(&dlm_reco_state_lock);
  506. list_splice_init(&dlm->reco.node_data, &tmplist);
  507. spin_unlock(&dlm_reco_state_lock);
  508. list_for_each_safe(iter, iter2, &tmplist) {
  509. ndata = list_entry (iter, struct dlm_reco_node_data, list);
  510. list_del_init(&ndata->list);
  511. kfree(ndata);
  512. }
  513. }
  514. static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
  515. u8 dead_node)
  516. {
  517. struct dlm_lock_request lr;
  518. enum dlm_status ret;
  519. mlog(0, "\n");
  520. mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
  521. "to %u\n", dead_node, request_from);
  522. memset(&lr, 0, sizeof(lr));
  523. lr.node_idx = dlm->node_num;
  524. lr.dead_node = dead_node;
  525. // send message
  526. ret = DLM_NOLOCKMGR;
  527. ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
  528. &lr, sizeof(lr), request_from, NULL);
  529. /* negative status is handled by caller */
  530. if (ret < 0)
  531. mlog_errno(ret);
  532. // return from here, then
  533. // sleep until all received or error
  534. return ret;
  535. }
  536. int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
  537. {
  538. struct dlm_ctxt *dlm = data;
  539. struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
  540. char *buf = NULL;
  541. struct dlm_work_item *item = NULL;
  542. if (!dlm_grab(dlm))
  543. return -EINVAL;
  544. BUG_ON(lr->dead_node != dlm->reco.dead_node);
  545. item = kcalloc(1, sizeof(*item), GFP_KERNEL);
  546. if (!item) {
  547. dlm_put(dlm);
  548. return -ENOMEM;
  549. }
  550. /* this will get freed by dlm_request_all_locks_worker */
  551. buf = (char *) __get_free_page(GFP_KERNEL);
  552. if (!buf) {
  553. kfree(item);
  554. dlm_put(dlm);
  555. return -ENOMEM;
  556. }
  557. /* queue up work for dlm_request_all_locks_worker */
  558. dlm_grab(dlm); /* get an extra ref for the work item */
  559. dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
  560. item->u.ral.reco_master = lr->node_idx;
  561. item->u.ral.dead_node = lr->dead_node;
  562. spin_lock(&dlm->work_lock);
  563. list_add_tail(&item->list, &dlm->work_list);
  564. spin_unlock(&dlm->work_lock);
  565. schedule_work(&dlm->dispatched_work);
  566. dlm_put(dlm);
  567. return 0;
  568. }
  569. static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
  570. {
  571. struct dlm_migratable_lockres *mres;
  572. struct dlm_lock_resource *res;
  573. struct dlm_ctxt *dlm;
  574. LIST_HEAD(resources);
  575. struct list_head *iter;
  576. int ret;
  577. u8 dead_node, reco_master;
  578. dlm = item->dlm;
  579. dead_node = item->u.ral.dead_node;
  580. reco_master = item->u.ral.reco_master;
  581. BUG_ON(dead_node != dlm->reco.dead_node);
  582. BUG_ON(reco_master != dlm->reco.new_master);
  583. mres = (struct dlm_migratable_lockres *)data;
  584. /* lock resources should have already been moved to the
  585. * dlm->reco.resources list. now move items from that list
  586. * to a temp list if the dead owner matches. note that the
  587. * whole cluster recovers only one node at a time, so we
  588. * can safely move UNKNOWN lock resources for each recovery
  589. * session. */
  590. dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
  591. /* now we can begin blasting lockreses without the dlm lock */
  592. list_for_each(iter, &resources) {
  593. res = list_entry (iter, struct dlm_lock_resource, recovering);
  594. ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
  595. DLM_MRES_RECOVERY);
  596. if (ret < 0)
  597. mlog_errno(ret);
  598. }
  599. /* move the resources back to the list */
  600. spin_lock(&dlm->spinlock);
  601. list_splice_init(&resources, &dlm->reco.resources);
  602. spin_unlock(&dlm->spinlock);
  603. ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
  604. if (ret < 0)
  605. mlog_errno(ret);
  606. free_page((unsigned long)data);
  607. }
  608. static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
  609. {
  610. int ret, tmpret;
  611. struct dlm_reco_data_done done_msg;
  612. memset(&done_msg, 0, sizeof(done_msg));
  613. done_msg.node_idx = dlm->node_num;
  614. done_msg.dead_node = dead_node;
  615. mlog(0, "sending DATA DONE message to %u, "
  616. "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
  617. done_msg.dead_node);
  618. ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
  619. sizeof(done_msg), send_to, &tmpret);
  620. /* negative status is ignored by the caller */
  621. if (ret >= 0)
  622. ret = tmpret;
  623. return ret;
  624. }
  625. int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
  626. {
  627. struct dlm_ctxt *dlm = data;
  628. struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
  629. struct list_head *iter;
  630. struct dlm_reco_node_data *ndata = NULL;
  631. int ret = -EINVAL;
  632. if (!dlm_grab(dlm))
  633. return -EINVAL;
  634. mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
  635. "node_idx=%u, this node=%u\n", done->dead_node,
  636. dlm->reco.dead_node, done->node_idx, dlm->node_num);
  637. BUG_ON(done->dead_node != dlm->reco.dead_node);
  638. spin_lock(&dlm_reco_state_lock);
  639. list_for_each(iter, &dlm->reco.node_data) {
  640. ndata = list_entry (iter, struct dlm_reco_node_data, list);
  641. if (ndata->node_num != done->node_idx)
  642. continue;
  643. switch (ndata->state) {
  644. case DLM_RECO_NODE_DATA_INIT:
  645. case DLM_RECO_NODE_DATA_DEAD:
  646. case DLM_RECO_NODE_DATA_DONE:
  647. case DLM_RECO_NODE_DATA_FINALIZE_SENT:
  648. mlog(ML_ERROR, "bad ndata state for node %u:"
  649. " state=%d\n", ndata->node_num,
  650. ndata->state);
  651. BUG();
  652. break;
  653. case DLM_RECO_NODE_DATA_RECEIVING:
  654. case DLM_RECO_NODE_DATA_REQUESTED:
  655. case DLM_RECO_NODE_DATA_REQUESTING:
  656. mlog(0, "node %u is DONE sending "
  657. "recovery data!\n",
  658. ndata->node_num);
  659. ndata->state = DLM_RECO_NODE_DATA_DONE;
  660. ret = 0;
  661. break;
  662. }
  663. }
  664. spin_unlock(&dlm_reco_state_lock);
  665. /* wake the recovery thread, some node is done */
  666. if (!ret)
  667. dlm_kick_recovery_thread(dlm);
  668. if (ret < 0)
  669. mlog(ML_ERROR, "failed to find recovery node data for node "
  670. "%u\n", done->node_idx);
  671. dlm_put(dlm);
  672. mlog(0, "leaving reco data done handler, ret=%d\n", ret);
  673. return ret;
  674. }
  675. static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
  676. struct list_head *list,
  677. u8 dead_node)
  678. {
  679. struct dlm_lock_resource *res;
  680. struct list_head *iter, *iter2;
  681. spin_lock(&dlm->spinlock);
  682. list_for_each_safe(iter, iter2, &dlm->reco.resources) {
  683. res = list_entry (iter, struct dlm_lock_resource, recovering);
  684. if (dlm_is_recovery_lock(res->lockname.name,
  685. res->lockname.len))
  686. continue;
  687. if (res->owner == dead_node) {
  688. mlog(0, "found lockres owned by dead node while "
  689. "doing recovery for node %u. sending it.\n",
  690. dead_node);
  691. list_del_init(&res->recovering);
  692. list_add_tail(&res->recovering, list);
  693. } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
  694. mlog(0, "found UNKNOWN owner while doing recovery "
  695. "for node %u. sending it.\n", dead_node);
  696. list_del_init(&res->recovering);
  697. list_add_tail(&res->recovering, list);
  698. }
  699. }
  700. spin_unlock(&dlm->spinlock);
  701. }
  702. static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
  703. {
  704. int total_locks = 0;
  705. struct list_head *iter, *queue = &res->granted;
  706. int i;
  707. for (i=0; i<3; i++) {
  708. list_for_each(iter, queue)
  709. total_locks++;
  710. queue++;
  711. }
  712. return total_locks;
  713. }
  714. static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
  715. struct dlm_migratable_lockres *mres,
  716. u8 send_to,
  717. struct dlm_lock_resource *res,
  718. int total_locks)
  719. {
  720. u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
  721. int mres_total_locks = be32_to_cpu(mres->total_locks);
  722. int sz, ret = 0, status = 0;
  723. u8 orig_flags = mres->flags,
  724. orig_master = mres->master;
  725. BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
  726. if (!mres->num_locks)
  727. return 0;
  728. sz = sizeof(struct dlm_migratable_lockres) +
  729. (mres->num_locks * sizeof(struct dlm_migratable_lock));
  730. /* add an all-done flag if we reached the last lock */
  731. orig_flags = mres->flags;
  732. BUG_ON(total_locks > mres_total_locks);
  733. if (total_locks == mres_total_locks)
  734. mres->flags |= DLM_MRES_ALL_DONE;
  735. /* send it */
  736. ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
  737. sz, send_to, &status);
  738. if (ret < 0) {
  739. /* XXX: negative status is not handled.
  740. * this will end up killing this node. */
  741. mlog_errno(ret);
  742. } else {
  743. /* might get an -ENOMEM back here */
  744. ret = status;
  745. if (ret < 0) {
  746. mlog_errno(ret);
  747. if (ret == -EFAULT) {
  748. mlog(ML_ERROR, "node %u told me to kill "
  749. "myself!\n", send_to);
  750. BUG();
  751. }
  752. }
  753. }
  754. /* zero and reinit the message buffer */
  755. dlm_init_migratable_lockres(mres, res->lockname.name,
  756. res->lockname.len, mres_total_locks,
  757. mig_cookie, orig_flags, orig_master);
  758. return ret;
  759. }
  760. static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
  761. const char *lockname, int namelen,
  762. int total_locks, u64 cookie,
  763. u8 flags, u8 master)
  764. {
  765. /* mres here is one full page */
  766. memset(mres, 0, PAGE_SIZE);
  767. mres->lockname_len = namelen;
  768. memcpy(mres->lockname, lockname, namelen);
  769. mres->num_locks = 0;
  770. mres->total_locks = cpu_to_be32(total_locks);
  771. mres->mig_cookie = cpu_to_be64(cookie);
  772. mres->flags = flags;
  773. mres->master = master;
  774. }
  775. /* returns 1 if this lock fills the network structure,
  776. * 0 otherwise */
  777. static int dlm_add_lock_to_array(struct dlm_lock *lock,
  778. struct dlm_migratable_lockres *mres, int queue)
  779. {
  780. struct dlm_migratable_lock *ml;
  781. int lock_num = mres->num_locks;
  782. ml = &(mres->ml[lock_num]);
  783. ml->cookie = lock->ml.cookie;
  784. ml->type = lock->ml.type;
  785. ml->convert_type = lock->ml.convert_type;
  786. ml->highest_blocked = lock->ml.highest_blocked;
  787. ml->list = queue;
  788. if (lock->lksb) {
  789. ml->flags = lock->lksb->flags;
  790. /* send our current lvb */
  791. if (ml->type == LKM_EXMODE ||
  792. ml->type == LKM_PRMODE) {
  793. /* if it is already set, this had better be a PR
  794. * and it has to match */
  795. if (mres->lvb[0] && (ml->type == LKM_EXMODE ||
  796. memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
  797. mlog(ML_ERROR, "mismatched lvbs!\n");
  798. __dlm_print_one_lock_resource(lock->lockres);
  799. BUG();
  800. }
  801. memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
  802. }
  803. }
  804. ml->node = lock->ml.node;
  805. mres->num_locks++;
  806. /* we reached the max, send this network message */
  807. if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
  808. return 1;
  809. return 0;
  810. }
  811. int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
  812. struct dlm_migratable_lockres *mres,
  813. u8 send_to, u8 flags)
  814. {
  815. struct list_head *queue, *iter;
  816. int total_locks, i;
  817. u64 mig_cookie = 0;
  818. struct dlm_lock *lock;
  819. int ret = 0;
  820. BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
  821. mlog(0, "sending to %u\n", send_to);
  822. total_locks = dlm_num_locks_in_lockres(res);
  823. if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
  824. /* rare, but possible */
  825. mlog(0, "argh. lockres has %d locks. this will "
  826. "require more than one network packet to "
  827. "migrate\n", total_locks);
  828. mig_cookie = dlm_get_next_mig_cookie();
  829. }
  830. dlm_init_migratable_lockres(mres, res->lockname.name,
  831. res->lockname.len, total_locks,
  832. mig_cookie, flags, res->owner);
  833. total_locks = 0;
  834. for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
  835. queue = dlm_list_idx_to_ptr(res, i);
  836. list_for_each(iter, queue) {
  837. lock = list_entry (iter, struct dlm_lock, list);
  838. /* add another lock. */
  839. total_locks++;
  840. if (!dlm_add_lock_to_array(lock, mres, i))
  841. continue;
  842. /* this filled the lock message,
  843. * we must send it immediately. */
  844. ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
  845. res, total_locks);
  846. if (ret < 0) {
  847. // TODO
  848. mlog(ML_ERROR, "dlm_send_mig_lockres_msg "
  849. "returned %d, TODO\n", ret);
  850. BUG();
  851. }
  852. }
  853. }
  854. /* flush any remaining locks */
  855. ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
  856. if (ret < 0) {
  857. // TODO
  858. mlog(ML_ERROR, "dlm_send_mig_lockres_msg returned %d, "
  859. "TODO\n", ret);
  860. BUG();
  861. }
  862. return ret;
  863. }
  864. /*
  865. * this message will contain no more than one page worth of
  866. * recovery data, and it will work on only one lockres.
  867. * there may be many locks in this page, and we may need to wait
  868. * for additional packets to complete all the locks (rare, but
  869. * possible).
  870. */
  871. /*
  872. * NOTE: the allocation error cases here are scary
  873. * we really cannot afford to fail an alloc in recovery
  874. * do we spin? returning an error only delays the problem really
  875. */
  876. int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
  877. {
  878. struct dlm_ctxt *dlm = data;
  879. struct dlm_migratable_lockres *mres =
  880. (struct dlm_migratable_lockres *)msg->buf;
  881. int ret = 0;
  882. u8 real_master;
  883. char *buf = NULL;
  884. struct dlm_work_item *item = NULL;
  885. struct dlm_lock_resource *res = NULL;
  886. if (!dlm_grab(dlm))
  887. return -EINVAL;
  888. BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
  889. real_master = mres->master;
  890. if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
  891. /* cannot migrate a lockres with no master */
  892. BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
  893. }
  894. mlog(0, "%s message received from node %u\n",
  895. (mres->flags & DLM_MRES_RECOVERY) ?
  896. "recovery" : "migration", mres->master);
  897. if (mres->flags & DLM_MRES_ALL_DONE)
  898. mlog(0, "all done flag. all lockres data received!\n");
  899. ret = -ENOMEM;
  900. buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL);
  901. item = kcalloc(1, sizeof(*item), GFP_KERNEL);
  902. if (!buf || !item)
  903. goto leave;
  904. /* lookup the lock to see if we have a secondary queue for this
  905. * already... just add the locks in and this will have its owner
  906. * and RECOVERY flag changed when it completes. */
  907. res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
  908. if (res) {
  909. /* this will get a ref on res */
  910. /* mark it as recovering/migrating and hash it */
  911. spin_lock(&res->spinlock);
  912. if (mres->flags & DLM_MRES_RECOVERY) {
  913. res->state |= DLM_LOCK_RES_RECOVERING;
  914. } else {
  915. if (res->state & DLM_LOCK_RES_MIGRATING) {
  916. /* this is at least the second
  917. * lockres message */
  918. mlog(0, "lock %.*s is already migrating\n",
  919. mres->lockname_len,
  920. mres->lockname);
  921. } else if (res->state & DLM_LOCK_RES_RECOVERING) {
  922. /* caller should BUG */
  923. mlog(ML_ERROR, "node is attempting to migrate "
  924. "lock %.*s, but marked as recovering!\n",
  925. mres->lockname_len, mres->lockname);
  926. ret = -EFAULT;
  927. spin_unlock(&res->spinlock);
  928. goto leave;
  929. }
  930. res->state |= DLM_LOCK_RES_MIGRATING;
  931. }
  932. spin_unlock(&res->spinlock);
  933. } else {
  934. /* need to allocate, just like if it was
  935. * mastered here normally */
  936. res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
  937. if (!res)
  938. goto leave;
  939. /* to match the ref that we would have gotten if
  940. * dlm_lookup_lockres had succeeded */
  941. dlm_lockres_get(res);
  942. /* mark it as recovering/migrating and hash it */
  943. if (mres->flags & DLM_MRES_RECOVERY)
  944. res->state |= DLM_LOCK_RES_RECOVERING;
  945. else
  946. res->state |= DLM_LOCK_RES_MIGRATING;
  947. spin_lock(&dlm->spinlock);
  948. __dlm_insert_lockres(dlm, res);
  949. spin_unlock(&dlm->spinlock);
  950. /* now that the new lockres is inserted,
  951. * make it usable by other processes */
  952. spin_lock(&res->spinlock);
  953. res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
  954. spin_unlock(&res->spinlock);
  955. /* add an extra ref for just-allocated lockres
  956. * otherwise the lockres will be purged immediately */
  957. dlm_lockres_get(res);
  958. }
  959. /* at this point we have allocated everything we need,
  960. * and we have a hashed lockres with an extra ref and
  961. * the proper res->state flags. */
  962. ret = 0;
  963. if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
  964. /* migration cannot have an unknown master */
  965. BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
  966. mlog(0, "recovery has passed me a lockres with an "
  967. "unknown owner.. will need to requery: "
  968. "%.*s\n", mres->lockname_len, mres->lockname);
  969. } else {
  970. spin_lock(&res->spinlock);
  971. dlm_change_lockres_owner(dlm, res, dlm->node_num);
  972. spin_unlock(&res->spinlock);
  973. }
  974. /* queue up work for dlm_mig_lockres_worker */
  975. dlm_grab(dlm); /* get an extra ref for the work item */
  976. memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
  977. dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
  978. item->u.ml.lockres = res; /* already have a ref */
  979. item->u.ml.real_master = real_master;
  980. spin_lock(&dlm->work_lock);
  981. list_add_tail(&item->list, &dlm->work_list);
  982. spin_unlock(&dlm->work_lock);
  983. schedule_work(&dlm->dispatched_work);
  984. leave:
  985. dlm_put(dlm);
  986. if (ret < 0) {
  987. if (buf)
  988. kfree(buf);
  989. if (item)
  990. kfree(item);
  991. }
  992. mlog_exit(ret);
  993. return ret;
  994. }
  995. static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
  996. {
  997. struct dlm_ctxt *dlm = data;
  998. struct dlm_migratable_lockres *mres;
  999. int ret = 0;
  1000. struct dlm_lock_resource *res;
  1001. u8 real_master;
  1002. dlm = item->dlm;
  1003. mres = (struct dlm_migratable_lockres *)data;
  1004. res = item->u.ml.lockres;
  1005. real_master = item->u.ml.real_master;
  1006. if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
  1007. /* this case is super-rare. only occurs if
  1008. * node death happens during migration. */
  1009. again:
  1010. ret = dlm_lockres_master_requery(dlm, res, &real_master);
  1011. if (ret < 0) {
  1012. mlog(0, "dlm_lockres_master_requery failure: %d\n",
  1013. ret);
  1014. goto again;
  1015. }
  1016. if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
  1017. mlog(0, "lockres %.*s not claimed. "
  1018. "this node will take it.\n",
  1019. res->lockname.len, res->lockname.name);
  1020. } else {
  1021. mlog(0, "master needs to respond to sender "
  1022. "that node %u still owns %.*s\n",
  1023. real_master, res->lockname.len,
  1024. res->lockname.name);
  1025. /* cannot touch this lockres */
  1026. goto leave;
  1027. }
  1028. }
  1029. ret = dlm_process_recovery_data(dlm, res, mres);
  1030. if (ret < 0)
  1031. mlog(0, "dlm_process_recovery_data returned %d\n", ret);
  1032. else
  1033. mlog(0, "dlm_process_recovery_data succeeded\n");
  1034. if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
  1035. (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
  1036. ret = dlm_finish_migration(dlm, res, mres->master);
  1037. if (ret < 0)
  1038. mlog_errno(ret);
  1039. }
  1040. leave:
  1041. kfree(data);
  1042. mlog_exit(ret);
  1043. }
  1044. static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
  1045. struct dlm_lock_resource *res,
  1046. u8 *real_master)
  1047. {
  1048. struct dlm_node_iter iter;
  1049. int nodenum;
  1050. int ret = 0;
  1051. *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
  1052. /* we only reach here if one of the two nodes in a
  1053. * migration died while the migration was in progress.
  1054. * at this point we need to requery the master. we
  1055. * know that the new_master got as far as creating
  1056. * an mle on at least one node, but we do not know
  1057. * if any nodes had actually cleared the mle and set
  1058. * the master to the new_master. the old master
  1059. * is supposed to set the owner to UNKNOWN in the
  1060. * event of a new_master death, so the only possible
  1061. * responses that we can get from nodes here are
  1062. * that the master is new_master, or that the master
  1063. * is UNKNOWN.
  1064. * if all nodes come back with UNKNOWN then we know
  1065. * the lock needs remastering here.
  1066. * if any node comes back with a valid master, check
  1067. * to see if that master is the one that we are
  1068. * recovering. if so, then the new_master died and
  1069. * we need to remaster this lock. if not, then the
  1070. * new_master survived and that node will respond to
  1071. * other nodes about the owner.
  1072. * if there is an owner, this node needs to dump this
  1073. * lockres and alert the sender that this lockres
  1074. * was rejected. */
  1075. spin_lock(&dlm->spinlock);
  1076. dlm_node_iter_init(dlm->domain_map, &iter);
  1077. spin_unlock(&dlm->spinlock);
  1078. while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
  1079. /* do not send to self */
  1080. if (nodenum == dlm->node_num)
  1081. continue;
  1082. ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
  1083. if (ret < 0) {
  1084. mlog_errno(ret);
  1085. BUG();
  1086. /* TODO: need to figure a way to restart this */
  1087. }
  1088. if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
  1089. mlog(0, "lock master is %u\n", *real_master);
  1090. break;
  1091. }
  1092. }
  1093. return ret;
  1094. }
  1095. static int dlm_do_master_requery(struct dlm_ctxt *dlm,
  1096. struct dlm_lock_resource *res,
  1097. u8 nodenum, u8 *real_master)
  1098. {
  1099. int ret = -EINVAL;
  1100. struct dlm_master_requery req;
  1101. int status = DLM_LOCK_RES_OWNER_UNKNOWN;
  1102. memset(&req, 0, sizeof(req));
  1103. req.node_idx = dlm->node_num;
  1104. req.namelen = res->lockname.len;
  1105. memcpy(req.name, res->lockname.name, res->lockname.len);
  1106. ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
  1107. &req, sizeof(req), nodenum, &status);
  1108. /* XXX: negative status not handled properly here. */
  1109. if (ret < 0)
  1110. mlog_errno(ret);
  1111. else {
  1112. BUG_ON(status < 0);
  1113. BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
  1114. *real_master = (u8) (status & 0xff);
  1115. mlog(0, "node %u responded to master requery with %u\n",
  1116. nodenum, *real_master);
  1117. ret = 0;
  1118. }
  1119. return ret;
  1120. }
  1121. /* this function cannot error, so unless the sending
  1122. * or receiving of the message failed, the owner can
  1123. * be trusted */
  1124. int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
  1125. {
  1126. struct dlm_ctxt *dlm = data;
  1127. struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
  1128. struct dlm_lock_resource *res = NULL;
  1129. int master = DLM_LOCK_RES_OWNER_UNKNOWN;
  1130. u32 flags = DLM_ASSERT_MASTER_REQUERY;
  1131. if (!dlm_grab(dlm)) {
  1132. /* since the domain has gone away on this
  1133. * node, the proper response is UNKNOWN */
  1134. return master;
  1135. }
  1136. spin_lock(&dlm->spinlock);
  1137. res = __dlm_lookup_lockres(dlm, req->name, req->namelen);
  1138. if (res) {
  1139. spin_lock(&res->spinlock);
  1140. master = res->owner;
  1141. if (master == dlm->node_num) {
  1142. int ret = dlm_dispatch_assert_master(dlm, res,
  1143. 0, 0, flags);
  1144. if (ret < 0) {
  1145. mlog_errno(-ENOMEM);
  1146. /* retry!? */
  1147. BUG();
  1148. }
  1149. }
  1150. spin_unlock(&res->spinlock);
  1151. }
  1152. spin_unlock(&dlm->spinlock);
  1153. dlm_put(dlm);
  1154. return master;
  1155. }
  1156. static inline struct list_head *
  1157. dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
  1158. {
  1159. struct list_head *ret;
  1160. BUG_ON(list_num < 0);
  1161. BUG_ON(list_num > 2);
  1162. ret = &(res->granted);
  1163. ret += list_num;
  1164. return ret;
  1165. }
  1166. /* TODO: do ast flush business
  1167. * TODO: do MIGRATING and RECOVERING spinning
  1168. */
  1169. /*
  1170. * NOTE about in-flight requests during migration:
  1171. *
  1172. * Before attempting the migrate, the master has marked the lockres as
  1173. * MIGRATING and then flushed all of its pending ASTS. So any in-flight
  1174. * requests either got queued before the MIGRATING flag got set, in which
  1175. * case the lock data will reflect the change and a return message is on
  1176. * the way, or the request failed to get in before MIGRATING got set. In
  1177. * this case, the caller will be told to spin and wait for the MIGRATING
  1178. * flag to be dropped, then recheck the master.
  1179. * This holds true for the convert, cancel and unlock cases, and since lvb
  1180. * updates are tied to these same messages, it applies to lvb updates as
  1181. * well. For the lock case, there is no way a lock can be on the master
  1182. * queue and not be on the secondary queue since the lock is always added
  1183. * locally first. This means that the new target node will never be sent
  1184. * a lock that he doesn't already have on the list.
  1185. * In total, this means that the local lock is correct and should not be
  1186. * updated to match the one sent by the master. Any messages sent back
  1187. * from the master before the MIGRATING flag will bring the lock properly
  1188. * up-to-date, and the change will be ordered properly for the waiter.
  1189. * We will *not* attempt to modify the lock underneath the waiter.
  1190. */
  1191. static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
  1192. struct dlm_lock_resource *res,
  1193. struct dlm_migratable_lockres *mres)
  1194. {
  1195. struct dlm_migratable_lock *ml;
  1196. struct list_head *queue;
  1197. struct dlm_lock *newlock = NULL;
  1198. struct dlm_lockstatus *lksb = NULL;
  1199. int ret = 0;
  1200. int i;
  1201. struct list_head *iter;
  1202. struct dlm_lock *lock = NULL;
  1203. mlog(0, "running %d locks for this lockres\n", mres->num_locks);
  1204. for (i=0; i<mres->num_locks; i++) {
  1205. ml = &(mres->ml[i]);
  1206. BUG_ON(ml->highest_blocked != LKM_IVMODE);
  1207. newlock = NULL;
  1208. lksb = NULL;
  1209. queue = dlm_list_num_to_pointer(res, ml->list);
  1210. /* if the lock is for the local node it needs to
  1211. * be moved to the proper location within the queue.
  1212. * do not allocate a new lock structure. */
  1213. if (ml->node == dlm->node_num) {
  1214. /* MIGRATION ONLY! */
  1215. BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
  1216. spin_lock(&res->spinlock);
  1217. list_for_each(iter, queue) {
  1218. lock = list_entry (iter, struct dlm_lock, list);
  1219. if (lock->ml.cookie != ml->cookie)
  1220. lock = NULL;
  1221. else
  1222. break;
  1223. }
  1224. /* lock is always created locally first, and
  1225. * destroyed locally last. it must be on the list */
  1226. if (!lock) {
  1227. mlog(ML_ERROR, "could not find local lock "
  1228. "with cookie %"MLFu64"!\n",
  1229. ml->cookie);
  1230. BUG();
  1231. }
  1232. BUG_ON(lock->ml.node != ml->node);
  1233. /* see NOTE above about why we do not update
  1234. * to match the master here */
  1235. /* move the lock to its proper place */
  1236. /* do not alter lock refcount. switching lists. */
  1237. list_del_init(&lock->list);
  1238. list_add_tail(&lock->list, queue);
  1239. spin_unlock(&res->spinlock);
  1240. mlog(0, "just reordered a local lock!\n");
  1241. continue;
  1242. }
  1243. /* lock is for another node. */
  1244. newlock = dlm_new_lock(ml->type, ml->node,
  1245. be64_to_cpu(ml->cookie), NULL);
  1246. if (!newlock) {
  1247. ret = -ENOMEM;
  1248. goto leave;
  1249. }
  1250. lksb = newlock->lksb;
  1251. dlm_lock_attach_lockres(newlock, res);
  1252. if (ml->convert_type != LKM_IVMODE) {
  1253. BUG_ON(queue != &res->converting);
  1254. newlock->ml.convert_type = ml->convert_type;
  1255. }
  1256. lksb->flags |= (ml->flags &
  1257. (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
  1258. if (mres->lvb[0]) {
  1259. if (lksb->flags & DLM_LKSB_PUT_LVB) {
  1260. /* other node was trying to update
  1261. * lvb when node died. recreate the
  1262. * lksb with the updated lvb. */
  1263. memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
  1264. } else {
  1265. /* otherwise, the node is sending its
  1266. * most recent valid lvb info */
  1267. BUG_ON(ml->type != LKM_EXMODE &&
  1268. ml->type != LKM_PRMODE);
  1269. if (res->lvb[0] && (ml->type == LKM_EXMODE ||
  1270. memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
  1271. mlog(ML_ERROR, "received bad lvb!\n");
  1272. __dlm_print_one_lock_resource(res);
  1273. BUG();
  1274. }
  1275. memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
  1276. }
  1277. }
  1278. /* NOTE:
  1279. * wrt lock queue ordering and recovery:
  1280. * 1. order of locks on granted queue is
  1281. * meaningless.
  1282. * 2. order of locks on converting queue is
  1283. * LOST with the node death. sorry charlie.
  1284. * 3. order of locks on the blocked queue is
  1285. * also LOST.
  1286. * order of locks does not affect integrity, it
  1287. * just means that a lock request may get pushed
  1288. * back in line as a result of the node death.
  1289. * also note that for a given node the lock order
  1290. * for its secondary queue locks is preserved
  1291. * relative to each other, but clearly *not*
  1292. * preserved relative to locks from other nodes.
  1293. */
  1294. spin_lock(&res->spinlock);
  1295. dlm_lock_get(newlock);
  1296. list_add_tail(&newlock->list, queue);
  1297. spin_unlock(&res->spinlock);
  1298. }
  1299. mlog(0, "done running all the locks\n");
  1300. leave:
  1301. if (ret < 0) {
  1302. mlog_errno(ret);
  1303. if (newlock)
  1304. dlm_lock_put(newlock);
  1305. }
  1306. mlog_exit(ret);
  1307. return ret;
  1308. }
  1309. void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
  1310. struct dlm_lock_resource *res)
  1311. {
  1312. int i;
  1313. struct list_head *queue, *iter, *iter2;
  1314. struct dlm_lock *lock;
  1315. res->state |= DLM_LOCK_RES_RECOVERING;
  1316. if (!list_empty(&res->recovering))
  1317. list_del_init(&res->recovering);
  1318. list_add_tail(&res->recovering, &dlm->reco.resources);
  1319. /* find any pending locks and put them back on proper list */
  1320. for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
  1321. queue = dlm_list_idx_to_ptr(res, i);
  1322. list_for_each_safe(iter, iter2, queue) {
  1323. lock = list_entry (iter, struct dlm_lock, list);
  1324. dlm_lock_get(lock);
  1325. if (lock->convert_pending) {
  1326. /* move converting lock back to granted */
  1327. BUG_ON(i != DLM_CONVERTING_LIST);
  1328. mlog(0, "node died with convert pending "
  1329. "on %.*s. move back to granted list.\n",
  1330. res->lockname.len, res->lockname.name);
  1331. dlm_revert_pending_convert(res, lock);
  1332. lock->convert_pending = 0;
  1333. } else if (lock->lock_pending) {
  1334. /* remove pending lock requests completely */
  1335. BUG_ON(i != DLM_BLOCKED_LIST);
  1336. mlog(0, "node died with lock pending "
  1337. "on %.*s. remove from blocked list and skip.\n",
  1338. res->lockname.len, res->lockname.name);
  1339. /* lock will be floating until ref in
  1340. * dlmlock_remote is freed after the network
  1341. * call returns. ok for it to not be on any
  1342. * list since no ast can be called
  1343. * (the master is dead). */
  1344. dlm_revert_pending_lock(res, lock);
  1345. lock->lock_pending = 0;
  1346. } else if (lock->unlock_pending) {
  1347. /* if an unlock was in progress, treat as
  1348. * if this had completed successfully
  1349. * before sending this lock state to the
  1350. * new master. note that the dlm_unlock
  1351. * call is still responsible for calling
  1352. * the unlockast. that will happen after
  1353. * the network call times out. for now,
  1354. * just move lists to prepare the new
  1355. * recovery master. */
  1356. BUG_ON(i != DLM_GRANTED_LIST);
  1357. mlog(0, "node died with unlock pending "
  1358. "on %.*s. remove from blocked list and skip.\n",
  1359. res->lockname.len, res->lockname.name);
  1360. dlm_commit_pending_unlock(res, lock);
  1361. lock->unlock_pending = 0;
  1362. } else if (lock->cancel_pending) {
  1363. /* if a cancel was in progress, treat as
  1364. * if this had completed successfully
  1365. * before sending this lock state to the
  1366. * new master */
  1367. BUG_ON(i != DLM_CONVERTING_LIST);
  1368. mlog(0, "node died with cancel pending "
  1369. "on %.*s. move back to granted list.\n",
  1370. res->lockname.len, res->lockname.name);
  1371. dlm_commit_pending_cancel(res, lock);
  1372. lock->cancel_pending = 0;
  1373. }
  1374. dlm_lock_put(lock);
  1375. }
  1376. }
  1377. }
  1378. /* removes all recovered locks from the recovery list.
  1379. * sets the res->owner to the new master.
  1380. * unsets the RECOVERY flag and wakes waiters. */
  1381. static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
  1382. u8 dead_node, u8 new_master)
  1383. {
  1384. int i;
  1385. struct list_head *iter, *iter2, *bucket;
  1386. struct dlm_lock_resource *res;
  1387. mlog_entry_void();
  1388. assert_spin_locked(&dlm->spinlock);
  1389. list_for_each_safe(iter, iter2, &dlm->reco.resources) {
  1390. res = list_entry (iter, struct dlm_lock_resource, recovering);
  1391. if (res->owner == dead_node) {
  1392. list_del_init(&res->recovering);
  1393. spin_lock(&res->spinlock);
  1394. dlm_change_lockres_owner(dlm, res, new_master);
  1395. res->state &= ~DLM_LOCK_RES_RECOVERING;
  1396. __dlm_dirty_lockres(dlm, res);
  1397. spin_unlock(&res->spinlock);
  1398. wake_up(&res->wq);
  1399. }
  1400. }
  1401. /* this will become unnecessary eventually, but
  1402. * for now we need to run the whole hash, clear
  1403. * the RECOVERING state and set the owner
  1404. * if necessary */
  1405. for (i=0; i<DLM_HASH_SIZE; i++) {
  1406. bucket = &(dlm->resources[i]);
  1407. list_for_each(iter, bucket) {
  1408. res = list_entry (iter, struct dlm_lock_resource, list);
  1409. if (res->state & DLM_LOCK_RES_RECOVERING) {
  1410. if (res->owner == dead_node) {
  1411. mlog(0, "(this=%u) res %.*s owner=%u "
  1412. "was not on recovering list, but "
  1413. "clearing state anyway\n",
  1414. dlm->node_num, res->lockname.len,
  1415. res->lockname.name, new_master);
  1416. } else if (res->owner == dlm->node_num) {
  1417. mlog(0, "(this=%u) res %.*s owner=%u "
  1418. "was not on recovering list, "
  1419. "owner is THIS node, clearing\n",
  1420. dlm->node_num, res->lockname.len,
  1421. res->lockname.name, new_master);
  1422. } else
  1423. continue;
  1424. spin_lock(&res->spinlock);
  1425. dlm_change_lockres_owner(dlm, res, new_master);
  1426. res->state &= ~DLM_LOCK_RES_RECOVERING;
  1427. __dlm_dirty_lockres(dlm, res);
  1428. spin_unlock(&res->spinlock);
  1429. wake_up(&res->wq);
  1430. }
  1431. }
  1432. }
  1433. }
  1434. static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
  1435. {
  1436. if (local) {
  1437. if (lock->ml.type != LKM_EXMODE &&
  1438. lock->ml.type != LKM_PRMODE)
  1439. return 1;
  1440. } else if (lock->ml.type == LKM_EXMODE)
  1441. return 1;
  1442. return 0;
  1443. }
  1444. static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
  1445. struct dlm_lock_resource *res, u8 dead_node)
  1446. {
  1447. struct list_head *iter, *queue;
  1448. struct dlm_lock *lock;
  1449. int blank_lvb = 0, local = 0;
  1450. int i;
  1451. u8 search_node;
  1452. assert_spin_locked(&dlm->spinlock);
  1453. assert_spin_locked(&res->spinlock);
  1454. if (res->owner == dlm->node_num)
  1455. /* if this node owned the lockres, and if the dead node
  1456. * had an EX when he died, blank out the lvb */
  1457. search_node = dead_node;
  1458. else {
  1459. /* if this is a secondary lockres, and we had no EX or PR
  1460. * locks granted, we can no longer trust the lvb */
  1461. search_node = dlm->node_num;
  1462. local = 1; /* check local state for valid lvb */
  1463. }
  1464. for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
  1465. queue = dlm_list_idx_to_ptr(res, i);
  1466. list_for_each(iter, queue) {
  1467. lock = list_entry (iter, struct dlm_lock, list);
  1468. if (lock->ml.node == search_node) {
  1469. if (dlm_lvb_needs_invalidation(lock, local)) {
  1470. /* zero the lksb lvb and lockres lvb */
  1471. blank_lvb = 1;
  1472. memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
  1473. }
  1474. }
  1475. }
  1476. }
  1477. if (blank_lvb) {
  1478. mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
  1479. res->lockname.len, res->lockname.name, dead_node);
  1480. memset(res->lvb, 0, DLM_LVB_LEN);
  1481. }
  1482. }
  1483. static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
  1484. struct dlm_lock_resource *res, u8 dead_node)
  1485. {
  1486. struct list_head *iter, *tmpiter;
  1487. struct dlm_lock *lock;
  1488. /* this node is the lockres master:
  1489. * 1) remove any stale locks for the dead node
  1490. * 2) if the dead node had an EX when he died, blank out the lvb
  1491. */
  1492. assert_spin_locked(&dlm->spinlock);
  1493. assert_spin_locked(&res->spinlock);
  1494. /* TODO: check pending_asts, pending_basts here */
  1495. list_for_each_safe(iter, tmpiter, &res->granted) {
  1496. lock = list_entry (iter, struct dlm_lock, list);
  1497. if (lock->ml.node == dead_node) {
  1498. list_del_init(&lock->list);
  1499. dlm_lock_put(lock);
  1500. }
  1501. }
  1502. list_for_each_safe(iter, tmpiter, &res->converting) {
  1503. lock = list_entry (iter, struct dlm_lock, list);
  1504. if (lock->ml.node == dead_node) {
  1505. list_del_init(&lock->list);
  1506. dlm_lock_put(lock);
  1507. }
  1508. }
  1509. list_for_each_safe(iter, tmpiter, &res->blocked) {
  1510. lock = list_entry (iter, struct dlm_lock, list);
  1511. if (lock->ml.node == dead_node) {
  1512. list_del_init(&lock->list);
  1513. dlm_lock_put(lock);
  1514. }
  1515. }
  1516. /* do not kick thread yet */
  1517. __dlm_dirty_lockres(dlm, res);
  1518. }
  1519. /* if this node is the recovery master, and there are no
  1520. * locks for a given lockres owned by this node that are in
  1521. * either PR or EX mode, zero out the lvb before requesting.
  1522. *
  1523. */
  1524. static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
  1525. {
  1526. struct list_head *iter;
  1527. struct dlm_lock_resource *res;
  1528. int i;
  1529. struct list_head *bucket;
  1530. /* purge any stale mles */
  1531. dlm_clean_master_list(dlm, dead_node);
  1532. /*
  1533. * now clean up all lock resources. there are two rules:
  1534. *
  1535. * 1) if the dead node was the master, move the lockres
  1536. * to the recovering list. set the RECOVERING flag.
  1537. * this lockres needs to be cleaned up before it can
  1538. * be used further.
  1539. *
  1540. * 2) if this node was the master, remove all locks from
  1541. * each of the lockres queues that were owned by the
  1542. * dead node. once recovery finishes, the dlm thread
  1543. * can be kicked again to see if any ASTs or BASTs
  1544. * need to be fired as a result.
  1545. */
  1546. for (i=0; i<DLM_HASH_SIZE; i++) {
  1547. bucket = &(dlm->resources[i]);
  1548. list_for_each(iter, bucket) {
  1549. res = list_entry (iter, struct dlm_lock_resource, list);
  1550. if (dlm_is_recovery_lock(res->lockname.name,
  1551. res->lockname.len))
  1552. continue;
  1553. spin_lock(&res->spinlock);
  1554. /* zero the lvb if necessary */
  1555. dlm_revalidate_lvb(dlm, res, dead_node);
  1556. if (res->owner == dead_node)
  1557. dlm_move_lockres_to_recovery_list(dlm, res);
  1558. else if (res->owner == dlm->node_num) {
  1559. dlm_free_dead_locks(dlm, res, dead_node);
  1560. __dlm_lockres_calc_usage(dlm, res);
  1561. }
  1562. spin_unlock(&res->spinlock);
  1563. }
  1564. }
  1565. }
  1566. static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
  1567. {
  1568. assert_spin_locked(&dlm->spinlock);
  1569. /* check to see if the node is already considered dead */
  1570. if (!test_bit(idx, dlm->live_nodes_map)) {
  1571. mlog(0, "for domain %s, node %d is already dead. "
  1572. "another node likely did recovery already.\n",
  1573. dlm->name, idx);
  1574. return;
  1575. }
  1576. /* check to see if we do not care about this node */
  1577. if (!test_bit(idx, dlm->domain_map)) {
  1578. /* This also catches the case that we get a node down
  1579. * but haven't joined the domain yet. */
  1580. mlog(0, "node %u already removed from domain!\n", idx);
  1581. return;
  1582. }
  1583. clear_bit(idx, dlm->live_nodes_map);
  1584. /* Clean up join state on node death. */
  1585. if (dlm->joining_node == idx) {
  1586. mlog(0, "Clearing join state for node %u\n", idx);
  1587. __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
  1588. }
  1589. /* make sure local cleanup occurs before the heartbeat events */
  1590. if (!test_bit(idx, dlm->recovery_map))
  1591. dlm_do_local_recovery_cleanup(dlm, idx);
  1592. /* notify anything attached to the heartbeat events */
  1593. dlm_hb_event_notify_attached(dlm, idx, 0);
  1594. mlog(0, "node %u being removed from domain map!\n", idx);
  1595. clear_bit(idx, dlm->domain_map);
  1596. /* wake up migration waiters if a node goes down.
  1597. * perhaps later we can genericize this for other waiters. */
  1598. wake_up(&dlm->migration_wq);
  1599. if (test_bit(idx, dlm->recovery_map))
  1600. mlog(0, "domain %s, node %u already added "
  1601. "to recovery map!\n", dlm->name, idx);
  1602. else
  1603. set_bit(idx, dlm->recovery_map);
  1604. }
  1605. void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
  1606. {
  1607. struct dlm_ctxt *dlm = data;
  1608. if (!dlm_grab(dlm))
  1609. return;
  1610. spin_lock(&dlm->spinlock);
  1611. __dlm_hb_node_down(dlm, idx);
  1612. spin_unlock(&dlm->spinlock);
  1613. dlm_put(dlm);
  1614. }
  1615. void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
  1616. {
  1617. struct dlm_ctxt *dlm = data;
  1618. if (!dlm_grab(dlm))
  1619. return;
  1620. spin_lock(&dlm->spinlock);
  1621. set_bit(idx, dlm->live_nodes_map);
  1622. /* notify any mles attached to the heartbeat events */
  1623. dlm_hb_event_notify_attached(dlm, idx, 1);
  1624. spin_unlock(&dlm->spinlock);
  1625. dlm_put(dlm);
  1626. }
  1627. static void dlm_reco_ast(void *astdata)
  1628. {
  1629. struct dlm_ctxt *dlm = astdata;
  1630. mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
  1631. dlm->node_num, dlm->name);
  1632. }
  1633. static void dlm_reco_bast(void *astdata, int blocked_type)
  1634. {
  1635. struct dlm_ctxt *dlm = astdata;
  1636. mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
  1637. dlm->node_num, dlm->name);
  1638. }
  1639. static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
  1640. {
  1641. mlog(0, "unlockast for recovery lock fired!\n");
  1642. }
  1643. static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
  1644. {
  1645. enum dlm_status ret;
  1646. struct dlm_lockstatus lksb;
  1647. int status = -EINVAL;
  1648. mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
  1649. dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
  1650. retry:
  1651. memset(&lksb, 0, sizeof(lksb));
  1652. ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
  1653. DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
  1654. if (ret == DLM_NORMAL) {
  1655. mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
  1656. dlm->name, dlm->node_num);
  1657. /* I am master, send message to all nodes saying
  1658. * that I am beginning a recovery session */
  1659. status = dlm_send_begin_reco_message(dlm,
  1660. dlm->reco.dead_node);
  1661. /* recovery lock is a special case. ast will not get fired,
  1662. * so just go ahead and unlock it. */
  1663. ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
  1664. if (ret != DLM_NORMAL) {
  1665. /* this would really suck. this could only happen
  1666. * if there was a network error during the unlock
  1667. * because of node death. this means the unlock
  1668. * is actually "done" and the lock structure is
  1669. * even freed. we can continue, but only
  1670. * because this specific lock name is special. */
  1671. mlog(0, "dlmunlock returned %d\n", ret);
  1672. }
  1673. if (status < 0) {
  1674. mlog(0, "failed to send recovery message. "
  1675. "must retry with new node map.\n");
  1676. goto retry;
  1677. }
  1678. } else if (ret == DLM_NOTQUEUED) {
  1679. mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
  1680. dlm->name, dlm->node_num);
  1681. /* another node is master. wait on
  1682. * reco.new_master != O2NM_INVALID_NODE_NUM */
  1683. status = -EEXIST;
  1684. }
  1685. return status;
  1686. }
  1687. static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
  1688. {
  1689. struct dlm_begin_reco br;
  1690. int ret = 0;
  1691. struct dlm_node_iter iter;
  1692. int nodenum;
  1693. int status;
  1694. mlog_entry("%u\n", dead_node);
  1695. mlog(0, "dead node is %u\n", dead_node);
  1696. spin_lock(&dlm->spinlock);
  1697. dlm_node_iter_init(dlm->domain_map, &iter);
  1698. spin_unlock(&dlm->spinlock);
  1699. clear_bit(dead_node, iter.node_map);
  1700. memset(&br, 0, sizeof(br));
  1701. br.node_idx = dlm->node_num;
  1702. br.dead_node = dead_node;
  1703. while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
  1704. ret = 0;
  1705. if (nodenum == dead_node) {
  1706. mlog(0, "not sending begin reco to dead node "
  1707. "%u\n", dead_node);
  1708. continue;
  1709. }
  1710. if (nodenum == dlm->node_num) {
  1711. mlog(0, "not sending begin reco to self\n");
  1712. continue;
  1713. }
  1714. ret = -EINVAL;
  1715. mlog(0, "attempting to send begin reco msg to %d\n",
  1716. nodenum);
  1717. ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
  1718. &br, sizeof(br), nodenum, &status);
  1719. /* negative status is handled ok by caller here */
  1720. if (ret >= 0)
  1721. ret = status;
  1722. if (ret < 0) {
  1723. struct dlm_lock_resource *res;
  1724. mlog_errno(ret);
  1725. mlog(ML_ERROR, "begin reco of dlm %s to node %u "
  1726. " returned %d\n", dlm->name, nodenum, ret);
  1727. res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
  1728. DLM_RECOVERY_LOCK_NAME_LEN);
  1729. if (res) {
  1730. dlm_print_one_lock_resource(res);
  1731. dlm_lockres_put(res);
  1732. } else {
  1733. mlog(ML_ERROR, "recovery lock not found\n");
  1734. }
  1735. break;
  1736. }
  1737. }
  1738. return ret;
  1739. }
  1740. int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
  1741. {
  1742. struct dlm_ctxt *dlm = data;
  1743. struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
  1744. /* ok to return 0, domain has gone away */
  1745. if (!dlm_grab(dlm))
  1746. return 0;
  1747. mlog(0, "node %u wants to recover node %u\n",
  1748. br->node_idx, br->dead_node);
  1749. dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
  1750. spin_lock(&dlm->spinlock);
  1751. if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
  1752. mlog(0, "new_master already set to %u!\n",
  1753. dlm->reco.new_master);
  1754. }
  1755. if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
  1756. mlog(0, "dead_node already set to %u!\n",
  1757. dlm->reco.dead_node);
  1758. }
  1759. dlm->reco.new_master = br->node_idx;
  1760. dlm->reco.dead_node = br->dead_node;
  1761. if (!test_bit(br->dead_node, dlm->recovery_map)) {
  1762. mlog(ML_ERROR, "recovery master %u sees %u as dead, but this "
  1763. "node has not yet. marking %u as dead\n",
  1764. br->node_idx, br->dead_node, br->dead_node);
  1765. __dlm_hb_node_down(dlm, br->dead_node);
  1766. }
  1767. spin_unlock(&dlm->spinlock);
  1768. dlm_kick_recovery_thread(dlm);
  1769. dlm_put(dlm);
  1770. return 0;
  1771. }
  1772. static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
  1773. {
  1774. int ret = 0;
  1775. struct dlm_finalize_reco fr;
  1776. struct dlm_node_iter iter;
  1777. int nodenum;
  1778. int status;
  1779. mlog(0, "finishing recovery for node %s:%u\n",
  1780. dlm->name, dlm->reco.dead_node);
  1781. spin_lock(&dlm->spinlock);
  1782. dlm_node_iter_init(dlm->domain_map, &iter);
  1783. spin_unlock(&dlm->spinlock);
  1784. memset(&fr, 0, sizeof(fr));
  1785. fr.node_idx = dlm->node_num;
  1786. fr.dead_node = dlm->reco.dead_node;
  1787. while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
  1788. if (nodenum == dlm->node_num)
  1789. continue;
  1790. ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
  1791. &fr, sizeof(fr), nodenum, &status);
  1792. if (ret >= 0) {
  1793. ret = status;
  1794. if (dlm_is_host_down(ret)) {
  1795. /* this has no effect on this recovery
  1796. * session, so set the status to zero to
  1797. * finish out the last recovery */
  1798. mlog(ML_ERROR, "node %u went down after this "
  1799. "node finished recovery.\n", nodenum);
  1800. ret = 0;
  1801. }
  1802. }
  1803. if (ret < 0) {
  1804. mlog_errno(ret);
  1805. break;
  1806. }
  1807. }
  1808. return ret;
  1809. }
  1810. int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
  1811. {
  1812. struct dlm_ctxt *dlm = data;
  1813. struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
  1814. /* ok to return 0, domain has gone away */
  1815. if (!dlm_grab(dlm))
  1816. return 0;
  1817. mlog(0, "node %u finalizing recovery of node %u\n",
  1818. fr->node_idx, fr->dead_node);
  1819. spin_lock(&dlm->spinlock);
  1820. if (dlm->reco.new_master != fr->node_idx) {
  1821. mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
  1822. "%u is supposed to be the new master, dead=%u\n",
  1823. fr->node_idx, dlm->reco.new_master, fr->dead_node);
  1824. BUG();
  1825. }
  1826. if (dlm->reco.dead_node != fr->dead_node) {
  1827. mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
  1828. "node %u, but node %u is supposed to be dead\n",
  1829. fr->node_idx, fr->dead_node, dlm->reco.dead_node);
  1830. BUG();
  1831. }
  1832. dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
  1833. spin_unlock(&dlm->spinlock);
  1834. dlm_reset_recovery(dlm);
  1835. dlm_kick_recovery_thread(dlm);
  1836. dlm_put(dlm);
  1837. return 0;
  1838. }