dlmmaster.c 81 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * dlmmod.c
  5. *
  6. * standalone DLM module
  7. *
  8. * Copyright (C) 2004 Oracle. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2 of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public
  21. * License along with this program; if not, write to the
  22. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23. * Boston, MA 021110-1307, USA.
  24. *
  25. */
  26. #include <linux/module.h>
  27. #include <linux/fs.h>
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/highmem.h>
  31. #include <linux/utsname.h>
  32. #include <linux/init.h>
  33. #include <linux/sysctl.h>
  34. #include <linux/random.h>
  35. #include <linux/blkdev.h>
  36. #include <linux/socket.h>
  37. #include <linux/inet.h>
  38. #include <linux/spinlock.h>
  39. #include <linux/delay.h>
  40. #include "cluster/heartbeat.h"
  41. #include "cluster/nodemanager.h"
  42. #include "cluster/tcp.h"
  43. #include "dlmapi.h"
  44. #include "dlmcommon.h"
  45. #include "dlmdebug.h"
  46. #include "dlmdomain.h"
  47. #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
  48. #include "cluster/masklog.h"
  49. enum dlm_mle_type {
  50. DLM_MLE_BLOCK,
  51. DLM_MLE_MASTER,
  52. DLM_MLE_MIGRATION
  53. };
  54. struct dlm_lock_name
  55. {
  56. u8 len;
  57. u8 name[DLM_LOCKID_NAME_MAX];
  58. };
  59. struct dlm_master_list_entry
  60. {
  61. struct list_head list;
  62. struct list_head hb_events;
  63. struct dlm_ctxt *dlm;
  64. spinlock_t spinlock;
  65. wait_queue_head_t wq;
  66. atomic_t woken;
  67. struct kref mle_refs;
  68. int inuse;
  69. unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
  70. unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
  71. unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
  72. unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
  73. u8 master;
  74. u8 new_master;
  75. enum dlm_mle_type type;
  76. struct o2hb_callback_func mle_hb_up;
  77. struct o2hb_callback_func mle_hb_down;
  78. union {
  79. struct dlm_lock_resource *res;
  80. struct dlm_lock_name name;
  81. } u;
  82. };
  83. static void dlm_mle_node_down(struct dlm_ctxt *dlm,
  84. struct dlm_master_list_entry *mle,
  85. struct o2nm_node *node,
  86. int idx);
  87. static void dlm_mle_node_up(struct dlm_ctxt *dlm,
  88. struct dlm_master_list_entry *mle,
  89. struct o2nm_node *node,
  90. int idx);
  91. static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
  92. static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
  93. unsigned int namelen, void *nodemap,
  94. u32 flags);
  95. static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
  96. struct dlm_master_list_entry *mle,
  97. const char *name,
  98. unsigned int namelen)
  99. {
  100. struct dlm_lock_resource *res;
  101. if (dlm != mle->dlm)
  102. return 0;
  103. if (mle->type == DLM_MLE_BLOCK ||
  104. mle->type == DLM_MLE_MIGRATION) {
  105. if (namelen != mle->u.name.len ||
  106. memcmp(name, mle->u.name.name, namelen)!=0)
  107. return 0;
  108. } else {
  109. res = mle->u.res;
  110. if (namelen != res->lockname.len ||
  111. memcmp(res->lockname.name, name, namelen) != 0)
  112. return 0;
  113. }
  114. return 1;
  115. }
  116. #if 0
  117. /* Code here is included but defined out as it aids debugging */
  118. #define dlm_print_nodemap(m) _dlm_print_nodemap(m,#m)
  119. void _dlm_print_nodemap(unsigned long *map, const char *mapname)
  120. {
  121. int i;
  122. printk("%s=[ ", mapname);
  123. for (i=0; i<O2NM_MAX_NODES; i++)
  124. if (test_bit(i, map))
  125. printk("%d ", i);
  126. printk("]");
  127. }
  128. void dlm_print_one_mle(struct dlm_master_list_entry *mle)
  129. {
  130. int refs;
  131. char *type;
  132. char attached;
  133. u8 master;
  134. unsigned int namelen;
  135. const char *name;
  136. struct kref *k;
  137. unsigned long *maybe = mle->maybe_map,
  138. *vote = mle->vote_map,
  139. *resp = mle->response_map,
  140. *node = mle->node_map;
  141. k = &mle->mle_refs;
  142. if (mle->type == DLM_MLE_BLOCK)
  143. type = "BLK";
  144. else if (mle->type == DLM_MLE_MASTER)
  145. type = "MAS";
  146. else
  147. type = "MIG";
  148. refs = atomic_read(&k->refcount);
  149. master = mle->master;
  150. attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
  151. if (mle->type != DLM_MLE_MASTER) {
  152. namelen = mle->u.name.len;
  153. name = mle->u.name.name;
  154. } else {
  155. namelen = mle->u.res->lockname.len;
  156. name = mle->u.res->lockname.name;
  157. }
  158. mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
  159. namelen, name, type, refs, master, mle->new_master, attached,
  160. mle->inuse);
  161. dlm_print_nodemap(maybe);
  162. printk(", ");
  163. dlm_print_nodemap(vote);
  164. printk(", ");
  165. dlm_print_nodemap(resp);
  166. printk(", ");
  167. dlm_print_nodemap(node);
  168. printk(", ");
  169. printk("\n");
  170. }
  171. static void dlm_dump_mles(struct dlm_ctxt *dlm)
  172. {
  173. struct dlm_master_list_entry *mle;
  174. struct list_head *iter;
  175. mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
  176. spin_lock(&dlm->master_lock);
  177. list_for_each(iter, &dlm->master_list) {
  178. mle = list_entry(iter, struct dlm_master_list_entry, list);
  179. dlm_print_one_mle(mle);
  180. }
  181. spin_unlock(&dlm->master_lock);
  182. }
  183. int dlm_dump_all_mles(const char __user *data, unsigned int len)
  184. {
  185. struct list_head *iter;
  186. struct dlm_ctxt *dlm;
  187. spin_lock(&dlm_domain_lock);
  188. list_for_each(iter, &dlm_domains) {
  189. dlm = list_entry (iter, struct dlm_ctxt, list);
  190. mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
  191. dlm_dump_mles(dlm);
  192. }
  193. spin_unlock(&dlm_domain_lock);
  194. return len;
  195. }
  196. EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
  197. #endif /* 0 */
  198. static kmem_cache_t *dlm_mle_cache = NULL;
  199. static void dlm_mle_release(struct kref *kref);
  200. static void dlm_init_mle(struct dlm_master_list_entry *mle,
  201. enum dlm_mle_type type,
  202. struct dlm_ctxt *dlm,
  203. struct dlm_lock_resource *res,
  204. const char *name,
  205. unsigned int namelen);
  206. static void dlm_put_mle(struct dlm_master_list_entry *mle);
  207. static void __dlm_put_mle(struct dlm_master_list_entry *mle);
  208. static int dlm_find_mle(struct dlm_ctxt *dlm,
  209. struct dlm_master_list_entry **mle,
  210. char *name, unsigned int namelen);
  211. static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to);
  212. static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
  213. struct dlm_lock_resource *res,
  214. struct dlm_master_list_entry *mle,
  215. int *blocked);
  216. static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
  217. struct dlm_lock_resource *res,
  218. struct dlm_master_list_entry *mle,
  219. int blocked);
  220. static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
  221. struct dlm_lock_resource *res,
  222. struct dlm_master_list_entry *mle,
  223. struct dlm_master_list_entry **oldmle,
  224. const char *name, unsigned int namelen,
  225. u8 new_master, u8 master);
  226. static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
  227. struct dlm_lock_resource *res);
  228. static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
  229. struct dlm_lock_resource *res);
  230. static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
  231. struct dlm_lock_resource *res,
  232. u8 target);
  233. static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
  234. struct dlm_lock_resource *res);
  235. int dlm_is_host_down(int errno)
  236. {
  237. switch (errno) {
  238. case -EBADF:
  239. case -ECONNREFUSED:
  240. case -ENOTCONN:
  241. case -ECONNRESET:
  242. case -EPIPE:
  243. case -EHOSTDOWN:
  244. case -EHOSTUNREACH:
  245. case -ETIMEDOUT:
  246. case -ECONNABORTED:
  247. case -ENETDOWN:
  248. case -ENETUNREACH:
  249. case -ENETRESET:
  250. case -ESHUTDOWN:
  251. case -ENOPROTOOPT:
  252. case -EINVAL: /* if returned from our tcp code,
  253. this means there is no socket */
  254. return 1;
  255. }
  256. return 0;
  257. }
  258. /*
  259. * MASTER LIST FUNCTIONS
  260. */
  261. /*
  262. * regarding master list entries and heartbeat callbacks:
  263. *
  264. * in order to avoid sleeping and allocation that occurs in
  265. * heartbeat, master list entries are simply attached to the
  266. * dlm's established heartbeat callbacks. the mle is attached
  267. * when it is created, and since the dlm->spinlock is held at
  268. * that time, any heartbeat event will be properly discovered
  269. * by the mle. the mle needs to be detached from the
  270. * dlm->mle_hb_events list as soon as heartbeat events are no
  271. * longer useful to the mle, and before the mle is freed.
  272. *
  273. * as a general rule, heartbeat events are no longer needed by
  274. * the mle once an "answer" regarding the lock master has been
  275. * received.
  276. */
  277. static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
  278. struct dlm_master_list_entry *mle)
  279. {
  280. assert_spin_locked(&dlm->spinlock);
  281. list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
  282. }
  283. static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
  284. struct dlm_master_list_entry *mle)
  285. {
  286. if (!list_empty(&mle->hb_events))
  287. list_del_init(&mle->hb_events);
  288. }
  289. static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
  290. struct dlm_master_list_entry *mle)
  291. {
  292. spin_lock(&dlm->spinlock);
  293. __dlm_mle_detach_hb_events(dlm, mle);
  294. spin_unlock(&dlm->spinlock);
  295. }
  296. static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
  297. {
  298. struct dlm_ctxt *dlm;
  299. dlm = mle->dlm;
  300. assert_spin_locked(&dlm->spinlock);
  301. assert_spin_locked(&dlm->master_lock);
  302. mle->inuse++;
  303. kref_get(&mle->mle_refs);
  304. }
  305. static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
  306. {
  307. struct dlm_ctxt *dlm;
  308. dlm = mle->dlm;
  309. spin_lock(&dlm->spinlock);
  310. spin_lock(&dlm->master_lock);
  311. mle->inuse--;
  312. __dlm_put_mle(mle);
  313. spin_unlock(&dlm->master_lock);
  314. spin_unlock(&dlm->spinlock);
  315. }
  316. /* remove from list and free */
  317. static void __dlm_put_mle(struct dlm_master_list_entry *mle)
  318. {
  319. struct dlm_ctxt *dlm;
  320. dlm = mle->dlm;
  321. assert_spin_locked(&dlm->spinlock);
  322. assert_spin_locked(&dlm->master_lock);
  323. if (!atomic_read(&mle->mle_refs.refcount)) {
  324. /* this may or may not crash, but who cares.
  325. * it's a BUG. */
  326. mlog(ML_ERROR, "bad mle: %p\n", mle);
  327. dlm_print_one_mle(mle);
  328. BUG();
  329. } else
  330. kref_put(&mle->mle_refs, dlm_mle_release);
  331. }
  332. /* must not have any spinlocks coming in */
  333. static void dlm_put_mle(struct dlm_master_list_entry *mle)
  334. {
  335. struct dlm_ctxt *dlm;
  336. dlm = mle->dlm;
  337. spin_lock(&dlm->spinlock);
  338. spin_lock(&dlm->master_lock);
  339. __dlm_put_mle(mle);
  340. spin_unlock(&dlm->master_lock);
  341. spin_unlock(&dlm->spinlock);
  342. }
  343. static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
  344. {
  345. kref_get(&mle->mle_refs);
  346. }
  347. static void dlm_init_mle(struct dlm_master_list_entry *mle,
  348. enum dlm_mle_type type,
  349. struct dlm_ctxt *dlm,
  350. struct dlm_lock_resource *res,
  351. const char *name,
  352. unsigned int namelen)
  353. {
  354. assert_spin_locked(&dlm->spinlock);
  355. mle->dlm = dlm;
  356. mle->type = type;
  357. INIT_LIST_HEAD(&mle->list);
  358. INIT_LIST_HEAD(&mle->hb_events);
  359. memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
  360. spin_lock_init(&mle->spinlock);
  361. init_waitqueue_head(&mle->wq);
  362. atomic_set(&mle->woken, 0);
  363. kref_init(&mle->mle_refs);
  364. memset(mle->response_map, 0, sizeof(mle->response_map));
  365. mle->master = O2NM_MAX_NODES;
  366. mle->new_master = O2NM_MAX_NODES;
  367. mle->inuse = 0;
  368. if (mle->type == DLM_MLE_MASTER) {
  369. BUG_ON(!res);
  370. mle->u.res = res;
  371. } else if (mle->type == DLM_MLE_BLOCK) {
  372. BUG_ON(!name);
  373. memcpy(mle->u.name.name, name, namelen);
  374. mle->u.name.len = namelen;
  375. } else /* DLM_MLE_MIGRATION */ {
  376. BUG_ON(!name);
  377. memcpy(mle->u.name.name, name, namelen);
  378. mle->u.name.len = namelen;
  379. }
  380. /* copy off the node_map and register hb callbacks on our copy */
  381. memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
  382. memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
  383. clear_bit(dlm->node_num, mle->vote_map);
  384. clear_bit(dlm->node_num, mle->node_map);
  385. /* attach the mle to the domain node up/down events */
  386. __dlm_mle_attach_hb_events(dlm, mle);
  387. }
  388. /* returns 1 if found, 0 if not */
  389. static int dlm_find_mle(struct dlm_ctxt *dlm,
  390. struct dlm_master_list_entry **mle,
  391. char *name, unsigned int namelen)
  392. {
  393. struct dlm_master_list_entry *tmpmle;
  394. struct list_head *iter;
  395. assert_spin_locked(&dlm->master_lock);
  396. list_for_each(iter, &dlm->master_list) {
  397. tmpmle = list_entry(iter, struct dlm_master_list_entry, list);
  398. if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
  399. continue;
  400. dlm_get_mle(tmpmle);
  401. *mle = tmpmle;
  402. return 1;
  403. }
  404. return 0;
  405. }
  406. void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
  407. {
  408. struct dlm_master_list_entry *mle;
  409. struct list_head *iter;
  410. assert_spin_locked(&dlm->spinlock);
  411. list_for_each(iter, &dlm->mle_hb_events) {
  412. mle = list_entry(iter, struct dlm_master_list_entry,
  413. hb_events);
  414. if (node_up)
  415. dlm_mle_node_up(dlm, mle, NULL, idx);
  416. else
  417. dlm_mle_node_down(dlm, mle, NULL, idx);
  418. }
  419. }
  420. static void dlm_mle_node_down(struct dlm_ctxt *dlm,
  421. struct dlm_master_list_entry *mle,
  422. struct o2nm_node *node, int idx)
  423. {
  424. spin_lock(&mle->spinlock);
  425. if (!test_bit(idx, mle->node_map))
  426. mlog(0, "node %u already removed from nodemap!\n", idx);
  427. else
  428. clear_bit(idx, mle->node_map);
  429. spin_unlock(&mle->spinlock);
  430. }
  431. static void dlm_mle_node_up(struct dlm_ctxt *dlm,
  432. struct dlm_master_list_entry *mle,
  433. struct o2nm_node *node, int idx)
  434. {
  435. spin_lock(&mle->spinlock);
  436. if (test_bit(idx, mle->node_map))
  437. mlog(0, "node %u already in node map!\n", idx);
  438. else
  439. set_bit(idx, mle->node_map);
  440. spin_unlock(&mle->spinlock);
  441. }
  442. int dlm_init_mle_cache(void)
  443. {
  444. dlm_mle_cache = kmem_cache_create("dlm_mle_cache",
  445. sizeof(struct dlm_master_list_entry),
  446. 0, SLAB_HWCACHE_ALIGN,
  447. NULL, NULL);
  448. if (dlm_mle_cache == NULL)
  449. return -ENOMEM;
  450. return 0;
  451. }
  452. void dlm_destroy_mle_cache(void)
  453. {
  454. if (dlm_mle_cache)
  455. kmem_cache_destroy(dlm_mle_cache);
  456. }
  457. static void dlm_mle_release(struct kref *kref)
  458. {
  459. struct dlm_master_list_entry *mle;
  460. struct dlm_ctxt *dlm;
  461. mlog_entry_void();
  462. mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
  463. dlm = mle->dlm;
  464. if (mle->type != DLM_MLE_MASTER) {
  465. mlog(0, "calling mle_release for %.*s, type %d\n",
  466. mle->u.name.len, mle->u.name.name, mle->type);
  467. } else {
  468. mlog(0, "calling mle_release for %.*s, type %d\n",
  469. mle->u.res->lockname.len,
  470. mle->u.res->lockname.name, mle->type);
  471. }
  472. assert_spin_locked(&dlm->spinlock);
  473. assert_spin_locked(&dlm->master_lock);
  474. /* remove from list if not already */
  475. if (!list_empty(&mle->list))
  476. list_del_init(&mle->list);
  477. /* detach the mle from the domain node up/down events */
  478. __dlm_mle_detach_hb_events(dlm, mle);
  479. /* NOTE: kfree under spinlock here.
  480. * if this is bad, we can move this to a freelist. */
  481. kmem_cache_free(dlm_mle_cache, mle);
  482. }
  483. /*
  484. * LOCK RESOURCE FUNCTIONS
  485. */
  486. static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
  487. struct dlm_lock_resource *res,
  488. u8 owner)
  489. {
  490. assert_spin_locked(&res->spinlock);
  491. mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
  492. if (owner == dlm->node_num)
  493. atomic_inc(&dlm->local_resources);
  494. else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
  495. atomic_inc(&dlm->unknown_resources);
  496. else
  497. atomic_inc(&dlm->remote_resources);
  498. res->owner = owner;
  499. }
  500. void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
  501. struct dlm_lock_resource *res, u8 owner)
  502. {
  503. assert_spin_locked(&res->spinlock);
  504. if (owner == res->owner)
  505. return;
  506. if (res->owner == dlm->node_num)
  507. atomic_dec(&dlm->local_resources);
  508. else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
  509. atomic_dec(&dlm->unknown_resources);
  510. else
  511. atomic_dec(&dlm->remote_resources);
  512. dlm_set_lockres_owner(dlm, res, owner);
  513. }
  514. static void dlm_lockres_release(struct kref *kref)
  515. {
  516. struct dlm_lock_resource *res;
  517. res = container_of(kref, struct dlm_lock_resource, refs);
  518. /* This should not happen -- all lockres' have a name
  519. * associated with them at init time. */
  520. BUG_ON(!res->lockname.name);
  521. mlog(0, "destroying lockres %.*s\n", res->lockname.len,
  522. res->lockname.name);
  523. if (!hlist_unhashed(&res->hash_node) ||
  524. !list_empty(&res->granted) ||
  525. !list_empty(&res->converting) ||
  526. !list_empty(&res->blocked) ||
  527. !list_empty(&res->dirty) ||
  528. !list_empty(&res->recovering) ||
  529. !list_empty(&res->purge)) {
  530. mlog(ML_ERROR,
  531. "Going to BUG for resource %.*s."
  532. " We're on a list! [%c%c%c%c%c%c%c]\n",
  533. res->lockname.len, res->lockname.name,
  534. !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
  535. !list_empty(&res->granted) ? 'G' : ' ',
  536. !list_empty(&res->converting) ? 'C' : ' ',
  537. !list_empty(&res->blocked) ? 'B' : ' ',
  538. !list_empty(&res->dirty) ? 'D' : ' ',
  539. !list_empty(&res->recovering) ? 'R' : ' ',
  540. !list_empty(&res->purge) ? 'P' : ' ');
  541. dlm_print_one_lock_resource(res);
  542. }
  543. /* By the time we're ready to blow this guy away, we shouldn't
  544. * be on any lists. */
  545. BUG_ON(!hlist_unhashed(&res->hash_node));
  546. BUG_ON(!list_empty(&res->granted));
  547. BUG_ON(!list_empty(&res->converting));
  548. BUG_ON(!list_empty(&res->blocked));
  549. BUG_ON(!list_empty(&res->dirty));
  550. BUG_ON(!list_empty(&res->recovering));
  551. BUG_ON(!list_empty(&res->purge));
  552. kfree(res->lockname.name);
  553. kfree(res);
  554. }
  555. void dlm_lockres_put(struct dlm_lock_resource *res)
  556. {
  557. kref_put(&res->refs, dlm_lockres_release);
  558. }
  559. static void dlm_init_lockres(struct dlm_ctxt *dlm,
  560. struct dlm_lock_resource *res,
  561. const char *name, unsigned int namelen)
  562. {
  563. char *qname;
  564. /* If we memset here, we lose our reference to the kmalloc'd
  565. * res->lockname.name, so be sure to init every field
  566. * correctly! */
  567. qname = (char *) res->lockname.name;
  568. memcpy(qname, name, namelen);
  569. res->lockname.len = namelen;
  570. res->lockname.hash = dlm_lockid_hash(name, namelen);
  571. init_waitqueue_head(&res->wq);
  572. spin_lock_init(&res->spinlock);
  573. INIT_HLIST_NODE(&res->hash_node);
  574. INIT_LIST_HEAD(&res->granted);
  575. INIT_LIST_HEAD(&res->converting);
  576. INIT_LIST_HEAD(&res->blocked);
  577. INIT_LIST_HEAD(&res->dirty);
  578. INIT_LIST_HEAD(&res->recovering);
  579. INIT_LIST_HEAD(&res->purge);
  580. atomic_set(&res->asts_reserved, 0);
  581. res->migration_pending = 0;
  582. kref_init(&res->refs);
  583. /* just for consistency */
  584. spin_lock(&res->spinlock);
  585. dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
  586. spin_unlock(&res->spinlock);
  587. res->state = DLM_LOCK_RES_IN_PROGRESS;
  588. res->last_used = 0;
  589. memset(res->lvb, 0, DLM_LVB_LEN);
  590. }
  591. struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
  592. const char *name,
  593. unsigned int namelen)
  594. {
  595. struct dlm_lock_resource *res;
  596. res = kmalloc(sizeof(struct dlm_lock_resource), GFP_NOFS);
  597. if (!res)
  598. return NULL;
  599. res->lockname.name = kmalloc(namelen, GFP_NOFS);
  600. if (!res->lockname.name) {
  601. kfree(res);
  602. return NULL;
  603. }
  604. dlm_init_lockres(dlm, res, name, namelen);
  605. return res;
  606. }
  607. /*
  608. * lookup a lock resource by name.
  609. * may already exist in the hashtable.
  610. * lockid is null terminated
  611. *
  612. * if not, allocate enough for the lockres and for
  613. * the temporary structure used in doing the mastering.
  614. *
  615. * also, do a lookup in the dlm->master_list to see
  616. * if another node has begun mastering the same lock.
  617. * if so, there should be a block entry in there
  618. * for this name, and we should *not* attempt to master
  619. * the lock here. need to wait around for that node
  620. * to assert_master (or die).
  621. *
  622. */
  623. struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
  624. const char *lockid,
  625. int flags)
  626. {
  627. struct dlm_lock_resource *tmpres=NULL, *res=NULL;
  628. struct dlm_master_list_entry *mle = NULL;
  629. struct dlm_master_list_entry *alloc_mle = NULL;
  630. int blocked = 0;
  631. int ret, nodenum;
  632. struct dlm_node_iter iter;
  633. unsigned int namelen, hash;
  634. int tries = 0;
  635. int bit, wait_on_recovery = 0;
  636. BUG_ON(!lockid);
  637. namelen = strlen(lockid);
  638. hash = dlm_lockid_hash(lockid, namelen);
  639. mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
  640. lookup:
  641. spin_lock(&dlm->spinlock);
  642. tmpres = __dlm_lookup_lockres(dlm, lockid, namelen, hash);
  643. if (tmpres) {
  644. spin_unlock(&dlm->spinlock);
  645. mlog(0, "found in hash!\n");
  646. if (res)
  647. dlm_lockres_put(res);
  648. res = tmpres;
  649. goto leave;
  650. }
  651. if (!res) {
  652. spin_unlock(&dlm->spinlock);
  653. mlog(0, "allocating a new resource\n");
  654. /* nothing found and we need to allocate one. */
  655. alloc_mle = (struct dlm_master_list_entry *)
  656. kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
  657. if (!alloc_mle)
  658. goto leave;
  659. res = dlm_new_lockres(dlm, lockid, namelen);
  660. if (!res)
  661. goto leave;
  662. goto lookup;
  663. }
  664. mlog(0, "no lockres found, allocated our own: %p\n", res);
  665. if (flags & LKM_LOCAL) {
  666. /* caller knows it's safe to assume it's not mastered elsewhere
  667. * DONE! return right away */
  668. spin_lock(&res->spinlock);
  669. dlm_change_lockres_owner(dlm, res, dlm->node_num);
  670. __dlm_insert_lockres(dlm, res);
  671. spin_unlock(&res->spinlock);
  672. spin_unlock(&dlm->spinlock);
  673. /* lockres still marked IN_PROGRESS */
  674. goto wake_waiters;
  675. }
  676. /* check master list to see if another node has started mastering it */
  677. spin_lock(&dlm->master_lock);
  678. /* if we found a block, wait for lock to be mastered by another node */
  679. blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
  680. if (blocked) {
  681. if (mle->type == DLM_MLE_MASTER) {
  682. mlog(ML_ERROR, "master entry for nonexistent lock!\n");
  683. BUG();
  684. } else if (mle->type == DLM_MLE_MIGRATION) {
  685. /* migration is in progress! */
  686. /* the good news is that we now know the
  687. * "current" master (mle->master). */
  688. spin_unlock(&dlm->master_lock);
  689. assert_spin_locked(&dlm->spinlock);
  690. /* set the lockres owner and hash it */
  691. spin_lock(&res->spinlock);
  692. dlm_set_lockres_owner(dlm, res, mle->master);
  693. __dlm_insert_lockres(dlm, res);
  694. spin_unlock(&res->spinlock);
  695. spin_unlock(&dlm->spinlock);
  696. /* master is known, detach */
  697. dlm_mle_detach_hb_events(dlm, mle);
  698. dlm_put_mle(mle);
  699. mle = NULL;
  700. goto wake_waiters;
  701. }
  702. } else {
  703. /* go ahead and try to master lock on this node */
  704. mle = alloc_mle;
  705. /* make sure this does not get freed below */
  706. alloc_mle = NULL;
  707. dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
  708. set_bit(dlm->node_num, mle->maybe_map);
  709. list_add(&mle->list, &dlm->master_list);
  710. /* still holding the dlm spinlock, check the recovery map
  711. * to see if there are any nodes that still need to be
  712. * considered. these will not appear in the mle nodemap
  713. * but they might own this lockres. wait on them. */
  714. bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
  715. if (bit < O2NM_MAX_NODES) {
  716. mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
  717. "recover before lock mastery can begin\n",
  718. dlm->name, namelen, (char *)lockid, bit);
  719. wait_on_recovery = 1;
  720. }
  721. }
  722. /* at this point there is either a DLM_MLE_BLOCK or a
  723. * DLM_MLE_MASTER on the master list, so it's safe to add the
  724. * lockres to the hashtable. anyone who finds the lock will
  725. * still have to wait on the IN_PROGRESS. */
  726. /* finally add the lockres to its hash bucket */
  727. __dlm_insert_lockres(dlm, res);
  728. /* get an extra ref on the mle in case this is a BLOCK
  729. * if so, the creator of the BLOCK may try to put the last
  730. * ref at this time in the assert master handler, so we
  731. * need an extra one to keep from a bad ptr deref. */
  732. dlm_get_mle_inuse(mle);
  733. spin_unlock(&dlm->master_lock);
  734. spin_unlock(&dlm->spinlock);
  735. redo_request:
  736. while (wait_on_recovery) {
  737. /* any cluster changes that occurred after dropping the
  738. * dlm spinlock would be detectable be a change on the mle,
  739. * so we only need to clear out the recovery map once. */
  740. if (dlm_is_recovery_lock(lockid, namelen)) {
  741. mlog(ML_NOTICE, "%s: recovery map is not empty, but "
  742. "must master $RECOVERY lock now\n", dlm->name);
  743. if (!dlm_pre_master_reco_lockres(dlm, res))
  744. wait_on_recovery = 0;
  745. else {
  746. mlog(0, "%s: waiting 500ms for heartbeat state "
  747. "change\n", dlm->name);
  748. msleep(500);
  749. }
  750. continue;
  751. }
  752. dlm_kick_recovery_thread(dlm);
  753. msleep(1000);
  754. dlm_wait_for_recovery(dlm);
  755. spin_lock(&dlm->spinlock);
  756. bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
  757. if (bit < O2NM_MAX_NODES) {
  758. mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
  759. "recover before lock mastery can begin\n",
  760. dlm->name, namelen, (char *)lockid, bit);
  761. wait_on_recovery = 1;
  762. } else
  763. wait_on_recovery = 0;
  764. spin_unlock(&dlm->spinlock);
  765. if (wait_on_recovery)
  766. dlm_wait_for_node_recovery(dlm, bit, 10000);
  767. }
  768. /* must wait for lock to be mastered elsewhere */
  769. if (blocked)
  770. goto wait;
  771. ret = -EINVAL;
  772. dlm_node_iter_init(mle->vote_map, &iter);
  773. while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
  774. ret = dlm_do_master_request(mle, nodenum);
  775. if (ret < 0)
  776. mlog_errno(ret);
  777. if (mle->master != O2NM_MAX_NODES) {
  778. /* found a master ! */
  779. if (mle->master <= nodenum)
  780. break;
  781. /* if our master request has not reached the master
  782. * yet, keep going until it does. this is how the
  783. * master will know that asserts are needed back to
  784. * the lower nodes. */
  785. mlog(0, "%s:%.*s: requests only up to %u but master "
  786. "is %u, keep going\n", dlm->name, namelen,
  787. lockid, nodenum, mle->master);
  788. }
  789. }
  790. wait:
  791. /* keep going until the response map includes all nodes */
  792. ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
  793. if (ret < 0) {
  794. wait_on_recovery = 1;
  795. mlog(0, "%s:%.*s: node map changed, redo the "
  796. "master request now, blocked=%d\n",
  797. dlm->name, res->lockname.len,
  798. res->lockname.name, blocked);
  799. if (++tries > 20) {
  800. mlog(ML_ERROR, "%s:%.*s: spinning on "
  801. "dlm_wait_for_lock_mastery, blocked=%d\n",
  802. dlm->name, res->lockname.len,
  803. res->lockname.name, blocked);
  804. dlm_print_one_lock_resource(res);
  805. /* dlm_print_one_mle(mle); */
  806. tries = 0;
  807. }
  808. goto redo_request;
  809. }
  810. mlog(0, "lockres mastered by %u\n", res->owner);
  811. /* make sure we never continue without this */
  812. BUG_ON(res->owner == O2NM_MAX_NODES);
  813. /* master is known, detach if not already detached */
  814. dlm_mle_detach_hb_events(dlm, mle);
  815. dlm_put_mle(mle);
  816. /* put the extra ref */
  817. dlm_put_mle_inuse(mle);
  818. wake_waiters:
  819. spin_lock(&res->spinlock);
  820. res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
  821. spin_unlock(&res->spinlock);
  822. wake_up(&res->wq);
  823. leave:
  824. /* need to free the unused mle */
  825. if (alloc_mle)
  826. kmem_cache_free(dlm_mle_cache, alloc_mle);
  827. return res;
  828. }
  829. #define DLM_MASTERY_TIMEOUT_MS 5000
  830. static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
  831. struct dlm_lock_resource *res,
  832. struct dlm_master_list_entry *mle,
  833. int *blocked)
  834. {
  835. u8 m;
  836. int ret, bit;
  837. int map_changed, voting_done;
  838. int assert, sleep;
  839. recheck:
  840. ret = 0;
  841. assert = 0;
  842. /* check if another node has already become the owner */
  843. spin_lock(&res->spinlock);
  844. if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
  845. mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
  846. res->lockname.len, res->lockname.name, res->owner);
  847. spin_unlock(&res->spinlock);
  848. /* this will cause the master to re-assert across
  849. * the whole cluster, freeing up mles */
  850. if (res->owner != dlm->node_num) {
  851. ret = dlm_do_master_request(mle, res->owner);
  852. if (ret < 0) {
  853. /* give recovery a chance to run */
  854. mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
  855. msleep(500);
  856. goto recheck;
  857. }
  858. }
  859. ret = 0;
  860. goto leave;
  861. }
  862. spin_unlock(&res->spinlock);
  863. spin_lock(&mle->spinlock);
  864. m = mle->master;
  865. map_changed = (memcmp(mle->vote_map, mle->node_map,
  866. sizeof(mle->vote_map)) != 0);
  867. voting_done = (memcmp(mle->vote_map, mle->response_map,
  868. sizeof(mle->vote_map)) == 0);
  869. /* restart if we hit any errors */
  870. if (map_changed) {
  871. int b;
  872. mlog(0, "%s: %.*s: node map changed, restarting\n",
  873. dlm->name, res->lockname.len, res->lockname.name);
  874. ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
  875. b = (mle->type == DLM_MLE_BLOCK);
  876. if ((*blocked && !b) || (!*blocked && b)) {
  877. mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
  878. dlm->name, res->lockname.len, res->lockname.name,
  879. *blocked, b);
  880. *blocked = b;
  881. }
  882. spin_unlock(&mle->spinlock);
  883. if (ret < 0) {
  884. mlog_errno(ret);
  885. goto leave;
  886. }
  887. mlog(0, "%s:%.*s: restart lock mastery succeeded, "
  888. "rechecking now\n", dlm->name, res->lockname.len,
  889. res->lockname.name);
  890. goto recheck;
  891. } else {
  892. if (!voting_done) {
  893. mlog(0, "map not changed and voting not done "
  894. "for %s:%.*s\n", dlm->name, res->lockname.len,
  895. res->lockname.name);
  896. }
  897. }
  898. if (m != O2NM_MAX_NODES) {
  899. /* another node has done an assert!
  900. * all done! */
  901. sleep = 0;
  902. } else {
  903. sleep = 1;
  904. /* have all nodes responded? */
  905. if (voting_done && !*blocked) {
  906. bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
  907. if (dlm->node_num <= bit) {
  908. /* my node number is lowest.
  909. * now tell other nodes that I am
  910. * mastering this. */
  911. mle->master = dlm->node_num;
  912. assert = 1;
  913. sleep = 0;
  914. }
  915. /* if voting is done, but we have not received
  916. * an assert master yet, we must sleep */
  917. }
  918. }
  919. spin_unlock(&mle->spinlock);
  920. /* sleep if we haven't finished voting yet */
  921. if (sleep) {
  922. unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
  923. /*
  924. if (atomic_read(&mle->mle_refs.refcount) < 2)
  925. mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
  926. atomic_read(&mle->mle_refs.refcount),
  927. res->lockname.len, res->lockname.name);
  928. */
  929. atomic_set(&mle->woken, 0);
  930. (void)wait_event_timeout(mle->wq,
  931. (atomic_read(&mle->woken) == 1),
  932. timeo);
  933. if (res->owner == O2NM_MAX_NODES) {
  934. mlog(0, "waiting again\n");
  935. goto recheck;
  936. }
  937. mlog(0, "done waiting, master is %u\n", res->owner);
  938. ret = 0;
  939. goto leave;
  940. }
  941. ret = 0; /* done */
  942. if (assert) {
  943. m = dlm->node_num;
  944. mlog(0, "about to master %.*s here, this=%u\n",
  945. res->lockname.len, res->lockname.name, m);
  946. ret = dlm_do_assert_master(dlm, res->lockname.name,
  947. res->lockname.len, mle->vote_map, 0);
  948. if (ret) {
  949. /* This is a failure in the network path,
  950. * not in the response to the assert_master
  951. * (any nonzero response is a BUG on this node).
  952. * Most likely a socket just got disconnected
  953. * due to node death. */
  954. mlog_errno(ret);
  955. }
  956. /* no longer need to restart lock mastery.
  957. * all living nodes have been contacted. */
  958. ret = 0;
  959. }
  960. /* set the lockres owner */
  961. spin_lock(&res->spinlock);
  962. dlm_change_lockres_owner(dlm, res, m);
  963. spin_unlock(&res->spinlock);
  964. leave:
  965. return ret;
  966. }
  967. struct dlm_bitmap_diff_iter
  968. {
  969. int curnode;
  970. unsigned long *orig_bm;
  971. unsigned long *cur_bm;
  972. unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
  973. };
  974. enum dlm_node_state_change
  975. {
  976. NODE_DOWN = -1,
  977. NODE_NO_CHANGE = 0,
  978. NODE_UP
  979. };
  980. static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
  981. unsigned long *orig_bm,
  982. unsigned long *cur_bm)
  983. {
  984. unsigned long p1, p2;
  985. int i;
  986. iter->curnode = -1;
  987. iter->orig_bm = orig_bm;
  988. iter->cur_bm = cur_bm;
  989. for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
  990. p1 = *(iter->orig_bm + i);
  991. p2 = *(iter->cur_bm + i);
  992. iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
  993. }
  994. }
  995. static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
  996. enum dlm_node_state_change *state)
  997. {
  998. int bit;
  999. if (iter->curnode >= O2NM_MAX_NODES)
  1000. return -ENOENT;
  1001. bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
  1002. iter->curnode+1);
  1003. if (bit >= O2NM_MAX_NODES) {
  1004. iter->curnode = O2NM_MAX_NODES;
  1005. return -ENOENT;
  1006. }
  1007. /* if it was there in the original then this node died */
  1008. if (test_bit(bit, iter->orig_bm))
  1009. *state = NODE_DOWN;
  1010. else
  1011. *state = NODE_UP;
  1012. iter->curnode = bit;
  1013. return bit;
  1014. }
  1015. static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
  1016. struct dlm_lock_resource *res,
  1017. struct dlm_master_list_entry *mle,
  1018. int blocked)
  1019. {
  1020. struct dlm_bitmap_diff_iter bdi;
  1021. enum dlm_node_state_change sc;
  1022. int node;
  1023. int ret = 0;
  1024. mlog(0, "something happened such that the "
  1025. "master process may need to be restarted!\n");
  1026. assert_spin_locked(&mle->spinlock);
  1027. dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
  1028. node = dlm_bitmap_diff_iter_next(&bdi, &sc);
  1029. while (node >= 0) {
  1030. if (sc == NODE_UP) {
  1031. /* a node came up. clear any old vote from
  1032. * the response map and set it in the vote map
  1033. * then restart the mastery. */
  1034. mlog(ML_NOTICE, "node %d up while restarting\n", node);
  1035. /* redo the master request, but only for the new node */
  1036. mlog(0, "sending request to new node\n");
  1037. clear_bit(node, mle->response_map);
  1038. set_bit(node, mle->vote_map);
  1039. } else {
  1040. mlog(ML_ERROR, "node down! %d\n", node);
  1041. if (blocked) {
  1042. int lowest = find_next_bit(mle->maybe_map,
  1043. O2NM_MAX_NODES, 0);
  1044. /* act like it was never there */
  1045. clear_bit(node, mle->maybe_map);
  1046. if (node == lowest) {
  1047. mlog(0, "expected master %u died"
  1048. " while this node was blocked "
  1049. "waiting on it!\n", node);
  1050. lowest = find_next_bit(mle->maybe_map,
  1051. O2NM_MAX_NODES,
  1052. lowest+1);
  1053. if (lowest < O2NM_MAX_NODES) {
  1054. mlog(0, "%s:%.*s:still "
  1055. "blocked. waiting on %u "
  1056. "now\n", dlm->name,
  1057. res->lockname.len,
  1058. res->lockname.name,
  1059. lowest);
  1060. } else {
  1061. /* mle is an MLE_BLOCK, but
  1062. * there is now nothing left to
  1063. * block on. we need to return
  1064. * all the way back out and try
  1065. * again with an MLE_MASTER.
  1066. * dlm_do_local_recovery_cleanup
  1067. * has already run, so the mle
  1068. * refcount is ok */
  1069. mlog(0, "%s:%.*s: no "
  1070. "longer blocking. try to "
  1071. "master this here\n",
  1072. dlm->name,
  1073. res->lockname.len,
  1074. res->lockname.name);
  1075. mle->type = DLM_MLE_MASTER;
  1076. mle->u.res = res;
  1077. }
  1078. }
  1079. }
  1080. /* now blank out everything, as if we had never
  1081. * contacted anyone */
  1082. memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
  1083. memset(mle->response_map, 0, sizeof(mle->response_map));
  1084. /* reset the vote_map to the current node_map */
  1085. memcpy(mle->vote_map, mle->node_map,
  1086. sizeof(mle->node_map));
  1087. /* put myself into the maybe map */
  1088. if (mle->type != DLM_MLE_BLOCK)
  1089. set_bit(dlm->node_num, mle->maybe_map);
  1090. }
  1091. ret = -EAGAIN;
  1092. node = dlm_bitmap_diff_iter_next(&bdi, &sc);
  1093. }
  1094. return ret;
  1095. }
  1096. /*
  1097. * DLM_MASTER_REQUEST_MSG
  1098. *
  1099. * returns: 0 on success,
  1100. * -errno on a network error
  1101. *
  1102. * on error, the caller should assume the target node is "dead"
  1103. *
  1104. */
  1105. static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to)
  1106. {
  1107. struct dlm_ctxt *dlm = mle->dlm;
  1108. struct dlm_master_request request;
  1109. int ret, response=0, resend;
  1110. memset(&request, 0, sizeof(request));
  1111. request.node_idx = dlm->node_num;
  1112. BUG_ON(mle->type == DLM_MLE_MIGRATION);
  1113. if (mle->type != DLM_MLE_MASTER) {
  1114. request.namelen = mle->u.name.len;
  1115. memcpy(request.name, mle->u.name.name, request.namelen);
  1116. } else {
  1117. request.namelen = mle->u.res->lockname.len;
  1118. memcpy(request.name, mle->u.res->lockname.name,
  1119. request.namelen);
  1120. }
  1121. again:
  1122. ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
  1123. sizeof(request), to, &response);
  1124. if (ret < 0) {
  1125. if (ret == -ESRCH) {
  1126. /* should never happen */
  1127. mlog(ML_ERROR, "TCP stack not ready!\n");
  1128. BUG();
  1129. } else if (ret == -EINVAL) {
  1130. mlog(ML_ERROR, "bad args passed to o2net!\n");
  1131. BUG();
  1132. } else if (ret == -ENOMEM) {
  1133. mlog(ML_ERROR, "out of memory while trying to send "
  1134. "network message! retrying\n");
  1135. /* this is totally crude */
  1136. msleep(50);
  1137. goto again;
  1138. } else if (!dlm_is_host_down(ret)) {
  1139. /* not a network error. bad. */
  1140. mlog_errno(ret);
  1141. mlog(ML_ERROR, "unhandled error!");
  1142. BUG();
  1143. }
  1144. /* all other errors should be network errors,
  1145. * and likely indicate node death */
  1146. mlog(ML_ERROR, "link to %d went down!\n", to);
  1147. goto out;
  1148. }
  1149. ret = 0;
  1150. resend = 0;
  1151. spin_lock(&mle->spinlock);
  1152. switch (response) {
  1153. case DLM_MASTER_RESP_YES:
  1154. set_bit(to, mle->response_map);
  1155. mlog(0, "node %u is the master, response=YES\n", to);
  1156. mle->master = to;
  1157. break;
  1158. case DLM_MASTER_RESP_NO:
  1159. mlog(0, "node %u not master, response=NO\n", to);
  1160. set_bit(to, mle->response_map);
  1161. break;
  1162. case DLM_MASTER_RESP_MAYBE:
  1163. mlog(0, "node %u not master, response=MAYBE\n", to);
  1164. set_bit(to, mle->response_map);
  1165. set_bit(to, mle->maybe_map);
  1166. break;
  1167. case DLM_MASTER_RESP_ERROR:
  1168. mlog(0, "node %u hit an error, resending\n", to);
  1169. resend = 1;
  1170. response = 0;
  1171. break;
  1172. default:
  1173. mlog(ML_ERROR, "bad response! %u\n", response);
  1174. BUG();
  1175. }
  1176. spin_unlock(&mle->spinlock);
  1177. if (resend) {
  1178. /* this is also totally crude */
  1179. msleep(50);
  1180. goto again;
  1181. }
  1182. out:
  1183. return ret;
  1184. }
  1185. /*
  1186. * locks that can be taken here:
  1187. * dlm->spinlock
  1188. * res->spinlock
  1189. * mle->spinlock
  1190. * dlm->master_list
  1191. *
  1192. * if possible, TRIM THIS DOWN!!!
  1193. */
  1194. int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
  1195. {
  1196. u8 response = DLM_MASTER_RESP_MAYBE;
  1197. struct dlm_ctxt *dlm = data;
  1198. struct dlm_lock_resource *res = NULL;
  1199. struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
  1200. struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
  1201. char *name;
  1202. unsigned int namelen, hash;
  1203. int found, ret;
  1204. int set_maybe;
  1205. int dispatch_assert = 0;
  1206. if (!dlm_grab(dlm))
  1207. return DLM_MASTER_RESP_NO;
  1208. if (!dlm_domain_fully_joined(dlm)) {
  1209. response = DLM_MASTER_RESP_NO;
  1210. goto send_response;
  1211. }
  1212. name = request->name;
  1213. namelen = request->namelen;
  1214. hash = dlm_lockid_hash(name, namelen);
  1215. if (namelen > DLM_LOCKID_NAME_MAX) {
  1216. response = DLM_IVBUFLEN;
  1217. goto send_response;
  1218. }
  1219. way_up_top:
  1220. spin_lock(&dlm->spinlock);
  1221. res = __dlm_lookup_lockres(dlm, name, namelen, hash);
  1222. if (res) {
  1223. spin_unlock(&dlm->spinlock);
  1224. /* take care of the easy cases up front */
  1225. spin_lock(&res->spinlock);
  1226. if (res->state & DLM_LOCK_RES_RECOVERING) {
  1227. spin_unlock(&res->spinlock);
  1228. mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
  1229. "being recovered\n");
  1230. response = DLM_MASTER_RESP_ERROR;
  1231. if (mle)
  1232. kmem_cache_free(dlm_mle_cache, mle);
  1233. goto send_response;
  1234. }
  1235. if (res->owner == dlm->node_num) {
  1236. spin_unlock(&res->spinlock);
  1237. // mlog(0, "this node is the master\n");
  1238. response = DLM_MASTER_RESP_YES;
  1239. if (mle)
  1240. kmem_cache_free(dlm_mle_cache, mle);
  1241. /* this node is the owner.
  1242. * there is some extra work that needs to
  1243. * happen now. the requesting node has
  1244. * caused all nodes up to this one to
  1245. * create mles. this node now needs to
  1246. * go back and clean those up. */
  1247. dispatch_assert = 1;
  1248. goto send_response;
  1249. } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
  1250. spin_unlock(&res->spinlock);
  1251. // mlog(0, "node %u is the master\n", res->owner);
  1252. response = DLM_MASTER_RESP_NO;
  1253. if (mle)
  1254. kmem_cache_free(dlm_mle_cache, mle);
  1255. goto send_response;
  1256. }
  1257. /* ok, there is no owner. either this node is
  1258. * being blocked, or it is actively trying to
  1259. * master this lock. */
  1260. if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
  1261. mlog(ML_ERROR, "lock with no owner should be "
  1262. "in-progress!\n");
  1263. BUG();
  1264. }
  1265. // mlog(0, "lockres is in progress...\n");
  1266. spin_lock(&dlm->master_lock);
  1267. found = dlm_find_mle(dlm, &tmpmle, name, namelen);
  1268. if (!found) {
  1269. mlog(ML_ERROR, "no mle found for this lock!\n");
  1270. BUG();
  1271. }
  1272. set_maybe = 1;
  1273. spin_lock(&tmpmle->spinlock);
  1274. if (tmpmle->type == DLM_MLE_BLOCK) {
  1275. // mlog(0, "this node is waiting for "
  1276. // "lockres to be mastered\n");
  1277. response = DLM_MASTER_RESP_NO;
  1278. } else if (tmpmle->type == DLM_MLE_MIGRATION) {
  1279. mlog(0, "node %u is master, but trying to migrate to "
  1280. "node %u.\n", tmpmle->master, tmpmle->new_master);
  1281. if (tmpmle->master == dlm->node_num) {
  1282. response = DLM_MASTER_RESP_YES;
  1283. mlog(ML_ERROR, "no owner on lockres, but this "
  1284. "node is trying to migrate it to %u?!\n",
  1285. tmpmle->new_master);
  1286. BUG();
  1287. } else {
  1288. /* the real master can respond on its own */
  1289. response = DLM_MASTER_RESP_NO;
  1290. }
  1291. } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
  1292. set_maybe = 0;
  1293. if (tmpmle->master == dlm->node_num) {
  1294. response = DLM_MASTER_RESP_YES;
  1295. /* this node will be the owner.
  1296. * go back and clean the mles on any
  1297. * other nodes */
  1298. dispatch_assert = 1;
  1299. } else
  1300. response = DLM_MASTER_RESP_NO;
  1301. } else {
  1302. // mlog(0, "this node is attempting to "
  1303. // "master lockres\n");
  1304. response = DLM_MASTER_RESP_MAYBE;
  1305. }
  1306. if (set_maybe)
  1307. set_bit(request->node_idx, tmpmle->maybe_map);
  1308. spin_unlock(&tmpmle->spinlock);
  1309. spin_unlock(&dlm->master_lock);
  1310. spin_unlock(&res->spinlock);
  1311. /* keep the mle attached to heartbeat events */
  1312. dlm_put_mle(tmpmle);
  1313. if (mle)
  1314. kmem_cache_free(dlm_mle_cache, mle);
  1315. goto send_response;
  1316. }
  1317. /*
  1318. * lockres doesn't exist on this node
  1319. * if there is an MLE_BLOCK, return NO
  1320. * if there is an MLE_MASTER, return MAYBE
  1321. * otherwise, add an MLE_BLOCK, return NO
  1322. */
  1323. spin_lock(&dlm->master_lock);
  1324. found = dlm_find_mle(dlm, &tmpmle, name, namelen);
  1325. if (!found) {
  1326. /* this lockid has never been seen on this node yet */
  1327. // mlog(0, "no mle found\n");
  1328. if (!mle) {
  1329. spin_unlock(&dlm->master_lock);
  1330. spin_unlock(&dlm->spinlock);
  1331. mle = (struct dlm_master_list_entry *)
  1332. kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
  1333. if (!mle) {
  1334. response = DLM_MASTER_RESP_ERROR;
  1335. mlog_errno(-ENOMEM);
  1336. goto send_response;
  1337. }
  1338. goto way_up_top;
  1339. }
  1340. // mlog(0, "this is second time thru, already allocated, "
  1341. // "add the block.\n");
  1342. dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
  1343. set_bit(request->node_idx, mle->maybe_map);
  1344. list_add(&mle->list, &dlm->master_list);
  1345. response = DLM_MASTER_RESP_NO;
  1346. } else {
  1347. // mlog(0, "mle was found\n");
  1348. set_maybe = 1;
  1349. spin_lock(&tmpmle->spinlock);
  1350. if (tmpmle->master == dlm->node_num) {
  1351. mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
  1352. BUG();
  1353. }
  1354. if (tmpmle->type == DLM_MLE_BLOCK)
  1355. response = DLM_MASTER_RESP_NO;
  1356. else if (tmpmle->type == DLM_MLE_MIGRATION) {
  1357. mlog(0, "migration mle was found (%u->%u)\n",
  1358. tmpmle->master, tmpmle->new_master);
  1359. /* real master can respond on its own */
  1360. response = DLM_MASTER_RESP_NO;
  1361. } else
  1362. response = DLM_MASTER_RESP_MAYBE;
  1363. if (set_maybe)
  1364. set_bit(request->node_idx, tmpmle->maybe_map);
  1365. spin_unlock(&tmpmle->spinlock);
  1366. }
  1367. spin_unlock(&dlm->master_lock);
  1368. spin_unlock(&dlm->spinlock);
  1369. if (found) {
  1370. /* keep the mle attached to heartbeat events */
  1371. dlm_put_mle(tmpmle);
  1372. }
  1373. send_response:
  1374. if (dispatch_assert) {
  1375. if (response != DLM_MASTER_RESP_YES)
  1376. mlog(ML_ERROR, "invalid response %d\n", response);
  1377. if (!res) {
  1378. mlog(ML_ERROR, "bad lockres while trying to assert!\n");
  1379. BUG();
  1380. }
  1381. mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
  1382. dlm->node_num, res->lockname.len, res->lockname.name);
  1383. ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
  1384. DLM_ASSERT_MASTER_MLE_CLEANUP);
  1385. if (ret < 0) {
  1386. mlog(ML_ERROR, "failed to dispatch assert master work\n");
  1387. response = DLM_MASTER_RESP_ERROR;
  1388. }
  1389. }
  1390. dlm_put(dlm);
  1391. return response;
  1392. }
  1393. /*
  1394. * DLM_ASSERT_MASTER_MSG
  1395. */
  1396. /*
  1397. * NOTE: this can be used for debugging
  1398. * can periodically run all locks owned by this node
  1399. * and re-assert across the cluster...
  1400. */
  1401. static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
  1402. unsigned int namelen, void *nodemap,
  1403. u32 flags)
  1404. {
  1405. struct dlm_assert_master assert;
  1406. int to, tmpret;
  1407. struct dlm_node_iter iter;
  1408. int ret = 0;
  1409. int reassert;
  1410. BUG_ON(namelen > O2NM_MAX_NAME_LEN);
  1411. again:
  1412. reassert = 0;
  1413. /* note that if this nodemap is empty, it returns 0 */
  1414. dlm_node_iter_init(nodemap, &iter);
  1415. while ((to = dlm_node_iter_next(&iter)) >= 0) {
  1416. int r = 0;
  1417. struct dlm_master_list_entry *mle = NULL;
  1418. mlog(0, "sending assert master to %d (%.*s)\n", to,
  1419. namelen, lockname);
  1420. memset(&assert, 0, sizeof(assert));
  1421. assert.node_idx = dlm->node_num;
  1422. assert.namelen = namelen;
  1423. memcpy(assert.name, lockname, namelen);
  1424. assert.flags = cpu_to_be32(flags);
  1425. tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
  1426. &assert, sizeof(assert), to, &r);
  1427. if (tmpret < 0) {
  1428. mlog(0, "assert_master returned %d!\n", tmpret);
  1429. if (!dlm_is_host_down(tmpret)) {
  1430. mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
  1431. BUG();
  1432. }
  1433. /* a node died. finish out the rest of the nodes. */
  1434. mlog(0, "link to %d went down!\n", to);
  1435. /* any nonzero status return will do */
  1436. ret = tmpret;
  1437. } else if (r < 0) {
  1438. /* ok, something horribly messed. kill thyself. */
  1439. mlog(ML_ERROR,"during assert master of %.*s to %u, "
  1440. "got %d.\n", namelen, lockname, to, r);
  1441. spin_lock(&dlm->spinlock);
  1442. spin_lock(&dlm->master_lock);
  1443. if (dlm_find_mle(dlm, &mle, (char *)lockname,
  1444. namelen)) {
  1445. dlm_print_one_mle(mle);
  1446. __dlm_put_mle(mle);
  1447. }
  1448. spin_unlock(&dlm->master_lock);
  1449. spin_unlock(&dlm->spinlock);
  1450. BUG();
  1451. } else if (r == EAGAIN) {
  1452. mlog(0, "%.*s: node %u create mles on other "
  1453. "nodes and requests a re-assert\n",
  1454. namelen, lockname, to);
  1455. reassert = 1;
  1456. }
  1457. }
  1458. if (reassert)
  1459. goto again;
  1460. return ret;
  1461. }
  1462. /*
  1463. * locks that can be taken here:
  1464. * dlm->spinlock
  1465. * res->spinlock
  1466. * mle->spinlock
  1467. * dlm->master_list
  1468. *
  1469. * if possible, TRIM THIS DOWN!!!
  1470. */
  1471. int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
  1472. {
  1473. struct dlm_ctxt *dlm = data;
  1474. struct dlm_master_list_entry *mle = NULL;
  1475. struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
  1476. struct dlm_lock_resource *res = NULL;
  1477. char *name;
  1478. unsigned int namelen, hash;
  1479. u32 flags;
  1480. int master_request = 0;
  1481. int ret = 0;
  1482. if (!dlm_grab(dlm))
  1483. return 0;
  1484. name = assert->name;
  1485. namelen = assert->namelen;
  1486. hash = dlm_lockid_hash(name, namelen);
  1487. flags = be32_to_cpu(assert->flags);
  1488. if (namelen > DLM_LOCKID_NAME_MAX) {
  1489. mlog(ML_ERROR, "Invalid name length!");
  1490. goto done;
  1491. }
  1492. spin_lock(&dlm->spinlock);
  1493. if (flags)
  1494. mlog(0, "assert_master with flags: %u\n", flags);
  1495. /* find the MLE */
  1496. spin_lock(&dlm->master_lock);
  1497. if (!dlm_find_mle(dlm, &mle, name, namelen)) {
  1498. /* not an error, could be master just re-asserting */
  1499. mlog(0, "just got an assert_master from %u, but no "
  1500. "MLE for it! (%.*s)\n", assert->node_idx,
  1501. namelen, name);
  1502. } else {
  1503. int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
  1504. if (bit >= O2NM_MAX_NODES) {
  1505. /* not necessarily an error, though less likely.
  1506. * could be master just re-asserting. */
  1507. mlog(0, "no bits set in the maybe_map, but %u "
  1508. "is asserting! (%.*s)\n", assert->node_idx,
  1509. namelen, name);
  1510. } else if (bit != assert->node_idx) {
  1511. if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
  1512. mlog(0, "master %u was found, %u should "
  1513. "back off\n", assert->node_idx, bit);
  1514. } else {
  1515. /* with the fix for bug 569, a higher node
  1516. * number winning the mastery will respond
  1517. * YES to mastery requests, but this node
  1518. * had no way of knowing. let it pass. */
  1519. mlog(0, "%u is the lowest node, "
  1520. "%u is asserting. (%.*s) %u must "
  1521. "have begun after %u won.\n", bit,
  1522. assert->node_idx, namelen, name, bit,
  1523. assert->node_idx);
  1524. }
  1525. }
  1526. if (mle->type == DLM_MLE_MIGRATION) {
  1527. if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
  1528. mlog(0, "%s:%.*s: got cleanup assert"
  1529. " from %u for migration\n",
  1530. dlm->name, namelen, name,
  1531. assert->node_idx);
  1532. } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
  1533. mlog(0, "%s:%.*s: got unrelated assert"
  1534. " from %u for migration, ignoring\n",
  1535. dlm->name, namelen, name,
  1536. assert->node_idx);
  1537. __dlm_put_mle(mle);
  1538. spin_unlock(&dlm->master_lock);
  1539. spin_unlock(&dlm->spinlock);
  1540. goto done;
  1541. }
  1542. }
  1543. }
  1544. spin_unlock(&dlm->master_lock);
  1545. /* ok everything checks out with the MLE
  1546. * now check to see if there is a lockres */
  1547. res = __dlm_lookup_lockres(dlm, name, namelen, hash);
  1548. if (res) {
  1549. spin_lock(&res->spinlock);
  1550. if (res->state & DLM_LOCK_RES_RECOVERING) {
  1551. mlog(ML_ERROR, "%u asserting but %.*s is "
  1552. "RECOVERING!\n", assert->node_idx, namelen, name);
  1553. goto kill;
  1554. }
  1555. if (!mle) {
  1556. if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
  1557. res->owner != assert->node_idx) {
  1558. mlog(ML_ERROR, "assert_master from "
  1559. "%u, but current owner is "
  1560. "%u! (%.*s)\n",
  1561. assert->node_idx, res->owner,
  1562. namelen, name);
  1563. goto kill;
  1564. }
  1565. } else if (mle->type != DLM_MLE_MIGRATION) {
  1566. if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
  1567. /* owner is just re-asserting */
  1568. if (res->owner == assert->node_idx) {
  1569. mlog(0, "owner %u re-asserting on "
  1570. "lock %.*s\n", assert->node_idx,
  1571. namelen, name);
  1572. goto ok;
  1573. }
  1574. mlog(ML_ERROR, "got assert_master from "
  1575. "node %u, but %u is the owner! "
  1576. "(%.*s)\n", assert->node_idx,
  1577. res->owner, namelen, name);
  1578. goto kill;
  1579. }
  1580. if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
  1581. mlog(ML_ERROR, "got assert from %u, but lock "
  1582. "with no owner should be "
  1583. "in-progress! (%.*s)\n",
  1584. assert->node_idx,
  1585. namelen, name);
  1586. goto kill;
  1587. }
  1588. } else /* mle->type == DLM_MLE_MIGRATION */ {
  1589. /* should only be getting an assert from new master */
  1590. if (assert->node_idx != mle->new_master) {
  1591. mlog(ML_ERROR, "got assert from %u, but "
  1592. "new master is %u, and old master "
  1593. "was %u (%.*s)\n",
  1594. assert->node_idx, mle->new_master,
  1595. mle->master, namelen, name);
  1596. goto kill;
  1597. }
  1598. }
  1599. ok:
  1600. spin_unlock(&res->spinlock);
  1601. }
  1602. spin_unlock(&dlm->spinlock);
  1603. // mlog(0, "woo! got an assert_master from node %u!\n",
  1604. // assert->node_idx);
  1605. if (mle) {
  1606. int extra_ref = 0;
  1607. int nn = -1;
  1608. int rr, err = 0;
  1609. spin_lock(&mle->spinlock);
  1610. if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
  1611. extra_ref = 1;
  1612. else {
  1613. /* MASTER mle: if any bits set in the response map
  1614. * then the calling node needs to re-assert to clear
  1615. * up nodes that this node contacted */
  1616. while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
  1617. nn+1)) < O2NM_MAX_NODES) {
  1618. if (nn != dlm->node_num && nn != assert->node_idx)
  1619. master_request = 1;
  1620. }
  1621. }
  1622. mle->master = assert->node_idx;
  1623. atomic_set(&mle->woken, 1);
  1624. wake_up(&mle->wq);
  1625. spin_unlock(&mle->spinlock);
  1626. if (res) {
  1627. spin_lock(&res->spinlock);
  1628. if (mle->type == DLM_MLE_MIGRATION) {
  1629. mlog(0, "finishing off migration of lockres %.*s, "
  1630. "from %u to %u\n",
  1631. res->lockname.len, res->lockname.name,
  1632. dlm->node_num, mle->new_master);
  1633. res->state &= ~DLM_LOCK_RES_MIGRATING;
  1634. dlm_change_lockres_owner(dlm, res, mle->new_master);
  1635. BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
  1636. } else {
  1637. dlm_change_lockres_owner(dlm, res, mle->master);
  1638. }
  1639. spin_unlock(&res->spinlock);
  1640. }
  1641. /* master is known, detach if not already detached.
  1642. * ensures that only one assert_master call will happen
  1643. * on this mle. */
  1644. spin_lock(&dlm->spinlock);
  1645. spin_lock(&dlm->master_lock);
  1646. rr = atomic_read(&mle->mle_refs.refcount);
  1647. if (mle->inuse > 0) {
  1648. if (extra_ref && rr < 3)
  1649. err = 1;
  1650. else if (!extra_ref && rr < 2)
  1651. err = 1;
  1652. } else {
  1653. if (extra_ref && rr < 2)
  1654. err = 1;
  1655. else if (!extra_ref && rr < 1)
  1656. err = 1;
  1657. }
  1658. if (err) {
  1659. mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
  1660. "that will mess up this node, refs=%d, extra=%d, "
  1661. "inuse=%d\n", dlm->name, namelen, name,
  1662. assert->node_idx, rr, extra_ref, mle->inuse);
  1663. dlm_print_one_mle(mle);
  1664. }
  1665. list_del_init(&mle->list);
  1666. __dlm_mle_detach_hb_events(dlm, mle);
  1667. __dlm_put_mle(mle);
  1668. if (extra_ref) {
  1669. /* the assert master message now balances the extra
  1670. * ref given by the master / migration request message.
  1671. * if this is the last put, it will be removed
  1672. * from the list. */
  1673. __dlm_put_mle(mle);
  1674. }
  1675. spin_unlock(&dlm->master_lock);
  1676. spin_unlock(&dlm->spinlock);
  1677. } else if (res) {
  1678. if (res->owner != assert->node_idx) {
  1679. mlog(0, "assert_master from %u, but current "
  1680. "owner is %u (%.*s), no mle\n", assert->node_idx,
  1681. res->owner, namelen, name);
  1682. }
  1683. }
  1684. done:
  1685. ret = 0;
  1686. if (res)
  1687. dlm_lockres_put(res);
  1688. dlm_put(dlm);
  1689. if (master_request) {
  1690. mlog(0, "need to tell master to reassert\n");
  1691. ret = EAGAIN; // positive. negative would shoot down the node.
  1692. }
  1693. return ret;
  1694. kill:
  1695. /* kill the caller! */
  1696. mlog(ML_ERROR, "Bad message received from another node. Dumping state "
  1697. "and killing the other node now! This node is OK and can continue.\n");
  1698. __dlm_print_one_lock_resource(res);
  1699. spin_unlock(&res->spinlock);
  1700. spin_unlock(&dlm->spinlock);
  1701. dlm_lockres_put(res);
  1702. dlm_put(dlm);
  1703. return -EINVAL;
  1704. }
  1705. int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
  1706. struct dlm_lock_resource *res,
  1707. int ignore_higher, u8 request_from, u32 flags)
  1708. {
  1709. struct dlm_work_item *item;
  1710. item = kcalloc(1, sizeof(*item), GFP_NOFS);
  1711. if (!item)
  1712. return -ENOMEM;
  1713. /* queue up work for dlm_assert_master_worker */
  1714. dlm_grab(dlm); /* get an extra ref for the work item */
  1715. dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
  1716. item->u.am.lockres = res; /* already have a ref */
  1717. /* can optionally ignore node numbers higher than this node */
  1718. item->u.am.ignore_higher = ignore_higher;
  1719. item->u.am.request_from = request_from;
  1720. item->u.am.flags = flags;
  1721. if (ignore_higher)
  1722. mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
  1723. res->lockname.name);
  1724. spin_lock(&dlm->work_lock);
  1725. list_add_tail(&item->list, &dlm->work_list);
  1726. spin_unlock(&dlm->work_lock);
  1727. schedule_work(&dlm->dispatched_work);
  1728. return 0;
  1729. }
  1730. static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
  1731. {
  1732. struct dlm_ctxt *dlm = data;
  1733. int ret = 0;
  1734. struct dlm_lock_resource *res;
  1735. unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
  1736. int ignore_higher;
  1737. int bit;
  1738. u8 request_from;
  1739. u32 flags;
  1740. dlm = item->dlm;
  1741. res = item->u.am.lockres;
  1742. ignore_higher = item->u.am.ignore_higher;
  1743. request_from = item->u.am.request_from;
  1744. flags = item->u.am.flags;
  1745. spin_lock(&dlm->spinlock);
  1746. memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
  1747. spin_unlock(&dlm->spinlock);
  1748. clear_bit(dlm->node_num, nodemap);
  1749. if (ignore_higher) {
  1750. /* if is this just to clear up mles for nodes below
  1751. * this node, do not send the message to the original
  1752. * caller or any node number higher than this */
  1753. clear_bit(request_from, nodemap);
  1754. bit = dlm->node_num;
  1755. while (1) {
  1756. bit = find_next_bit(nodemap, O2NM_MAX_NODES,
  1757. bit+1);
  1758. if (bit >= O2NM_MAX_NODES)
  1759. break;
  1760. clear_bit(bit, nodemap);
  1761. }
  1762. }
  1763. /*
  1764. * If we're migrating this lock to someone else, we are no
  1765. * longer allowed to assert out own mastery. OTOH, we need to
  1766. * prevent migration from starting while we're still asserting
  1767. * our dominance. The reserved ast delays migration.
  1768. */
  1769. spin_lock(&res->spinlock);
  1770. if (res->state & DLM_LOCK_RES_MIGRATING) {
  1771. mlog(0, "Someone asked us to assert mastery, but we're "
  1772. "in the middle of migration. Skipping assert, "
  1773. "the new master will handle that.\n");
  1774. spin_unlock(&res->spinlock);
  1775. goto put;
  1776. } else
  1777. __dlm_lockres_reserve_ast(res);
  1778. spin_unlock(&res->spinlock);
  1779. /* this call now finishes out the nodemap
  1780. * even if one or more nodes die */
  1781. mlog(0, "worker about to master %.*s here, this=%u\n",
  1782. res->lockname.len, res->lockname.name, dlm->node_num);
  1783. ret = dlm_do_assert_master(dlm, res->lockname.name,
  1784. res->lockname.len,
  1785. nodemap, flags);
  1786. if (ret < 0) {
  1787. /* no need to restart, we are done */
  1788. if (!dlm_is_host_down(ret))
  1789. mlog_errno(ret);
  1790. }
  1791. /* Ok, we've asserted ourselves. Let's let migration start. */
  1792. dlm_lockres_release_ast(dlm, res);
  1793. put:
  1794. dlm_lockres_put(res);
  1795. mlog(0, "finished with dlm_assert_master_worker\n");
  1796. }
  1797. /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
  1798. * We cannot wait for node recovery to complete to begin mastering this
  1799. * lockres because this lockres is used to kick off recovery! ;-)
  1800. * So, do a pre-check on all living nodes to see if any of those nodes
  1801. * think that $RECOVERY is currently mastered by a dead node. If so,
  1802. * we wait a short time to allow that node to get notified by its own
  1803. * heartbeat stack, then check again. All $RECOVERY lock resources
  1804. * mastered by dead nodes are purged when the hearbeat callback is
  1805. * fired, so we can know for sure that it is safe to continue once
  1806. * the node returns a live node or no node. */
  1807. static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
  1808. struct dlm_lock_resource *res)
  1809. {
  1810. struct dlm_node_iter iter;
  1811. int nodenum;
  1812. int ret = 0;
  1813. u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
  1814. spin_lock(&dlm->spinlock);
  1815. dlm_node_iter_init(dlm->domain_map, &iter);
  1816. spin_unlock(&dlm->spinlock);
  1817. while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
  1818. /* do not send to self */
  1819. if (nodenum == dlm->node_num)
  1820. continue;
  1821. ret = dlm_do_master_requery(dlm, res, nodenum, &master);
  1822. if (ret < 0) {
  1823. mlog_errno(ret);
  1824. if (!dlm_is_host_down(ret))
  1825. BUG();
  1826. /* host is down, so answer for that node would be
  1827. * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
  1828. ret = 0;
  1829. }
  1830. if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
  1831. /* check to see if this master is in the recovery map */
  1832. spin_lock(&dlm->spinlock);
  1833. if (test_bit(master, dlm->recovery_map)) {
  1834. mlog(ML_NOTICE, "%s: node %u has not seen "
  1835. "node %u go down yet, and thinks the "
  1836. "dead node is mastering the recovery "
  1837. "lock. must wait.\n", dlm->name,
  1838. nodenum, master);
  1839. ret = -EAGAIN;
  1840. }
  1841. spin_unlock(&dlm->spinlock);
  1842. mlog(0, "%s: reco lock master is %u\n", dlm->name,
  1843. master);
  1844. break;
  1845. }
  1846. }
  1847. return ret;
  1848. }
  1849. /*
  1850. * DLM_MIGRATE_LOCKRES
  1851. */
  1852. int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
  1853. u8 target)
  1854. {
  1855. struct dlm_master_list_entry *mle = NULL;
  1856. struct dlm_master_list_entry *oldmle = NULL;
  1857. struct dlm_migratable_lockres *mres = NULL;
  1858. int ret = -EINVAL;
  1859. const char *name;
  1860. unsigned int namelen;
  1861. int mle_added = 0;
  1862. struct list_head *queue, *iter;
  1863. int i;
  1864. struct dlm_lock *lock;
  1865. int empty = 1;
  1866. if (!dlm_grab(dlm))
  1867. return -EINVAL;
  1868. name = res->lockname.name;
  1869. namelen = res->lockname.len;
  1870. mlog(0, "migrating %.*s to %u\n", namelen, name, target);
  1871. /*
  1872. * ensure this lockres is a proper candidate for migration
  1873. */
  1874. spin_lock(&res->spinlock);
  1875. if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
  1876. mlog(0, "cannot migrate lockres with unknown owner!\n");
  1877. spin_unlock(&res->spinlock);
  1878. goto leave;
  1879. }
  1880. if (res->owner != dlm->node_num) {
  1881. mlog(0, "cannot migrate lockres this node doesn't own!\n");
  1882. spin_unlock(&res->spinlock);
  1883. goto leave;
  1884. }
  1885. mlog(0, "checking queues...\n");
  1886. queue = &res->granted;
  1887. for (i=0; i<3; i++) {
  1888. list_for_each(iter, queue) {
  1889. lock = list_entry (iter, struct dlm_lock, list);
  1890. empty = 0;
  1891. if (lock->ml.node == dlm->node_num) {
  1892. mlog(0, "found a lock owned by this node "
  1893. "still on the %s queue! will not "
  1894. "migrate this lockres\n",
  1895. i==0 ? "granted" :
  1896. (i==1 ? "converting" : "blocked"));
  1897. spin_unlock(&res->spinlock);
  1898. ret = -ENOTEMPTY;
  1899. goto leave;
  1900. }
  1901. }
  1902. queue++;
  1903. }
  1904. mlog(0, "all locks on this lockres are nonlocal. continuing\n");
  1905. spin_unlock(&res->spinlock);
  1906. /* no work to do */
  1907. if (empty) {
  1908. mlog(0, "no locks were found on this lockres! done!\n");
  1909. ret = 0;
  1910. goto leave;
  1911. }
  1912. /*
  1913. * preallocate up front
  1914. * if this fails, abort
  1915. */
  1916. ret = -ENOMEM;
  1917. mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
  1918. if (!mres) {
  1919. mlog_errno(ret);
  1920. goto leave;
  1921. }
  1922. mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
  1923. GFP_NOFS);
  1924. if (!mle) {
  1925. mlog_errno(ret);
  1926. goto leave;
  1927. }
  1928. ret = 0;
  1929. /*
  1930. * find a node to migrate the lockres to
  1931. */
  1932. mlog(0, "picking a migration node\n");
  1933. spin_lock(&dlm->spinlock);
  1934. /* pick a new node */
  1935. if (!test_bit(target, dlm->domain_map) ||
  1936. target >= O2NM_MAX_NODES) {
  1937. target = dlm_pick_migration_target(dlm, res);
  1938. }
  1939. mlog(0, "node %u chosen for migration\n", target);
  1940. if (target >= O2NM_MAX_NODES ||
  1941. !test_bit(target, dlm->domain_map)) {
  1942. /* target chosen is not alive */
  1943. ret = -EINVAL;
  1944. }
  1945. if (ret) {
  1946. spin_unlock(&dlm->spinlock);
  1947. goto fail;
  1948. }
  1949. mlog(0, "continuing with target = %u\n", target);
  1950. /*
  1951. * clear any existing master requests and
  1952. * add the migration mle to the list
  1953. */
  1954. spin_lock(&dlm->master_lock);
  1955. ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
  1956. namelen, target, dlm->node_num);
  1957. spin_unlock(&dlm->master_lock);
  1958. spin_unlock(&dlm->spinlock);
  1959. if (ret == -EEXIST) {
  1960. mlog(0, "another process is already migrating it\n");
  1961. goto fail;
  1962. }
  1963. mle_added = 1;
  1964. /*
  1965. * set the MIGRATING flag and flush asts
  1966. * if we fail after this we need to re-dirty the lockres
  1967. */
  1968. if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
  1969. mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
  1970. "the target went down.\n", res->lockname.len,
  1971. res->lockname.name, target);
  1972. spin_lock(&res->spinlock);
  1973. res->state &= ~DLM_LOCK_RES_MIGRATING;
  1974. spin_unlock(&res->spinlock);
  1975. ret = -EINVAL;
  1976. }
  1977. fail:
  1978. if (oldmle) {
  1979. /* master is known, detach if not already detached */
  1980. dlm_mle_detach_hb_events(dlm, oldmle);
  1981. dlm_put_mle(oldmle);
  1982. }
  1983. if (ret < 0) {
  1984. if (mle_added) {
  1985. dlm_mle_detach_hb_events(dlm, mle);
  1986. dlm_put_mle(mle);
  1987. } else if (mle) {
  1988. kmem_cache_free(dlm_mle_cache, mle);
  1989. }
  1990. goto leave;
  1991. }
  1992. /*
  1993. * at this point, we have a migration target, an mle
  1994. * in the master list, and the MIGRATING flag set on
  1995. * the lockres
  1996. */
  1997. /* get an extra reference on the mle.
  1998. * otherwise the assert_master from the new
  1999. * master will destroy this.
  2000. * also, make sure that all callers of dlm_get_mle
  2001. * take both dlm->spinlock and dlm->master_lock */
  2002. spin_lock(&dlm->spinlock);
  2003. spin_lock(&dlm->master_lock);
  2004. dlm_get_mle_inuse(mle);
  2005. spin_unlock(&dlm->master_lock);
  2006. spin_unlock(&dlm->spinlock);
  2007. /* notify new node and send all lock state */
  2008. /* call send_one_lockres with migration flag.
  2009. * this serves as notice to the target node that a
  2010. * migration is starting. */
  2011. ret = dlm_send_one_lockres(dlm, res, mres, target,
  2012. DLM_MRES_MIGRATION);
  2013. if (ret < 0) {
  2014. mlog(0, "migration to node %u failed with %d\n",
  2015. target, ret);
  2016. /* migration failed, detach and clean up mle */
  2017. dlm_mle_detach_hb_events(dlm, mle);
  2018. dlm_put_mle(mle);
  2019. dlm_put_mle_inuse(mle);
  2020. spin_lock(&res->spinlock);
  2021. res->state &= ~DLM_LOCK_RES_MIGRATING;
  2022. spin_unlock(&res->spinlock);
  2023. goto leave;
  2024. }
  2025. /* at this point, the target sends a message to all nodes,
  2026. * (using dlm_do_migrate_request). this node is skipped since
  2027. * we had to put an mle in the list to begin the process. this
  2028. * node now waits for target to do an assert master. this node
  2029. * will be the last one notified, ensuring that the migration
  2030. * is complete everywhere. if the target dies while this is
  2031. * going on, some nodes could potentially see the target as the
  2032. * master, so it is important that my recovery finds the migration
  2033. * mle and sets the master to UNKNONWN. */
  2034. /* wait for new node to assert master */
  2035. while (1) {
  2036. ret = wait_event_interruptible_timeout(mle->wq,
  2037. (atomic_read(&mle->woken) == 1),
  2038. msecs_to_jiffies(5000));
  2039. if (ret >= 0) {
  2040. if (atomic_read(&mle->woken) == 1 ||
  2041. res->owner == target)
  2042. break;
  2043. mlog(0, "timed out during migration\n");
  2044. /* avoid hang during shutdown when migrating lockres
  2045. * to a node which also goes down */
  2046. if (dlm_is_node_dead(dlm, target)) {
  2047. mlog(0, "%s:%.*s: expected migration "
  2048. "target %u is no longer up, restarting\n",
  2049. dlm->name, res->lockname.len,
  2050. res->lockname.name, target);
  2051. ret = -ERESTARTSYS;
  2052. }
  2053. }
  2054. if (ret == -ERESTARTSYS) {
  2055. /* migration failed, detach and clean up mle */
  2056. dlm_mle_detach_hb_events(dlm, mle);
  2057. dlm_put_mle(mle);
  2058. dlm_put_mle_inuse(mle);
  2059. spin_lock(&res->spinlock);
  2060. res->state &= ~DLM_LOCK_RES_MIGRATING;
  2061. spin_unlock(&res->spinlock);
  2062. goto leave;
  2063. }
  2064. /* TODO: if node died: stop, clean up, return error */
  2065. }
  2066. /* all done, set the owner, clear the flag */
  2067. spin_lock(&res->spinlock);
  2068. dlm_set_lockres_owner(dlm, res, target);
  2069. res->state &= ~DLM_LOCK_RES_MIGRATING;
  2070. dlm_remove_nonlocal_locks(dlm, res);
  2071. spin_unlock(&res->spinlock);
  2072. wake_up(&res->wq);
  2073. /* master is known, detach if not already detached */
  2074. dlm_mle_detach_hb_events(dlm, mle);
  2075. dlm_put_mle_inuse(mle);
  2076. ret = 0;
  2077. dlm_lockres_calc_usage(dlm, res);
  2078. leave:
  2079. /* re-dirty the lockres if we failed */
  2080. if (ret < 0)
  2081. dlm_kick_thread(dlm, res);
  2082. /* TODO: cleanup */
  2083. if (mres)
  2084. free_page((unsigned long)mres);
  2085. dlm_put(dlm);
  2086. mlog(0, "returning %d\n", ret);
  2087. return ret;
  2088. }
  2089. EXPORT_SYMBOL_GPL(dlm_migrate_lockres);
  2090. int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
  2091. {
  2092. int ret;
  2093. spin_lock(&dlm->ast_lock);
  2094. spin_lock(&lock->spinlock);
  2095. ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
  2096. spin_unlock(&lock->spinlock);
  2097. spin_unlock(&dlm->ast_lock);
  2098. return ret;
  2099. }
  2100. static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
  2101. struct dlm_lock_resource *res,
  2102. u8 mig_target)
  2103. {
  2104. int can_proceed;
  2105. spin_lock(&res->spinlock);
  2106. can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
  2107. spin_unlock(&res->spinlock);
  2108. /* target has died, so make the caller break out of the
  2109. * wait_event, but caller must recheck the domain_map */
  2110. spin_lock(&dlm->spinlock);
  2111. if (!test_bit(mig_target, dlm->domain_map))
  2112. can_proceed = 1;
  2113. spin_unlock(&dlm->spinlock);
  2114. return can_proceed;
  2115. }
  2116. int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
  2117. {
  2118. int ret;
  2119. spin_lock(&res->spinlock);
  2120. ret = !!(res->state & DLM_LOCK_RES_DIRTY);
  2121. spin_unlock(&res->spinlock);
  2122. return ret;
  2123. }
  2124. static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
  2125. struct dlm_lock_resource *res,
  2126. u8 target)
  2127. {
  2128. int ret = 0;
  2129. mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
  2130. res->lockname.len, res->lockname.name, dlm->node_num,
  2131. target);
  2132. /* need to set MIGRATING flag on lockres. this is done by
  2133. * ensuring that all asts have been flushed for this lockres. */
  2134. spin_lock(&res->spinlock);
  2135. BUG_ON(res->migration_pending);
  2136. res->migration_pending = 1;
  2137. /* strategy is to reserve an extra ast then release
  2138. * it below, letting the release do all of the work */
  2139. __dlm_lockres_reserve_ast(res);
  2140. spin_unlock(&res->spinlock);
  2141. /* now flush all the pending asts.. hang out for a bit */
  2142. dlm_kick_thread(dlm, res);
  2143. wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
  2144. dlm_lockres_release_ast(dlm, res);
  2145. mlog(0, "about to wait on migration_wq, dirty=%s\n",
  2146. res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
  2147. /* if the extra ref we just put was the final one, this
  2148. * will pass thru immediately. otherwise, we need to wait
  2149. * for the last ast to finish. */
  2150. again:
  2151. ret = wait_event_interruptible_timeout(dlm->migration_wq,
  2152. dlm_migration_can_proceed(dlm, res, target),
  2153. msecs_to_jiffies(1000));
  2154. if (ret < 0) {
  2155. mlog(0, "woken again: migrating? %s, dead? %s\n",
  2156. res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
  2157. test_bit(target, dlm->domain_map) ? "no":"yes");
  2158. } else {
  2159. mlog(0, "all is well: migrating? %s, dead? %s\n",
  2160. res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
  2161. test_bit(target, dlm->domain_map) ? "no":"yes");
  2162. }
  2163. if (!dlm_migration_can_proceed(dlm, res, target)) {
  2164. mlog(0, "trying again...\n");
  2165. goto again;
  2166. }
  2167. /* did the target go down or die? */
  2168. spin_lock(&dlm->spinlock);
  2169. if (!test_bit(target, dlm->domain_map)) {
  2170. mlog(ML_ERROR, "aha. migration target %u just went down\n",
  2171. target);
  2172. ret = -EHOSTDOWN;
  2173. }
  2174. spin_unlock(&dlm->spinlock);
  2175. /*
  2176. * at this point:
  2177. *
  2178. * o the DLM_LOCK_RES_MIGRATING flag is set
  2179. * o there are no pending asts on this lockres
  2180. * o all processes trying to reserve an ast on this
  2181. * lockres must wait for the MIGRATING flag to clear
  2182. */
  2183. return ret;
  2184. }
  2185. /* last step in the migration process.
  2186. * original master calls this to free all of the dlm_lock
  2187. * structures that used to be for other nodes. */
  2188. static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
  2189. struct dlm_lock_resource *res)
  2190. {
  2191. struct list_head *iter, *iter2;
  2192. struct list_head *queue = &res->granted;
  2193. int i;
  2194. struct dlm_lock *lock;
  2195. assert_spin_locked(&res->spinlock);
  2196. BUG_ON(res->owner == dlm->node_num);
  2197. for (i=0; i<3; i++) {
  2198. list_for_each_safe(iter, iter2, queue) {
  2199. lock = list_entry (iter, struct dlm_lock, list);
  2200. if (lock->ml.node != dlm->node_num) {
  2201. mlog(0, "putting lock for node %u\n",
  2202. lock->ml.node);
  2203. /* be extra careful */
  2204. BUG_ON(!list_empty(&lock->ast_list));
  2205. BUG_ON(!list_empty(&lock->bast_list));
  2206. BUG_ON(lock->ast_pending);
  2207. BUG_ON(lock->bast_pending);
  2208. list_del_init(&lock->list);
  2209. dlm_lock_put(lock);
  2210. }
  2211. }
  2212. queue++;
  2213. }
  2214. }
  2215. /* for now this is not too intelligent. we will
  2216. * need stats to make this do the right thing.
  2217. * this just finds the first lock on one of the
  2218. * queues and uses that node as the target. */
  2219. static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
  2220. struct dlm_lock_resource *res)
  2221. {
  2222. int i;
  2223. struct list_head *queue = &res->granted;
  2224. struct list_head *iter;
  2225. struct dlm_lock *lock;
  2226. int nodenum;
  2227. assert_spin_locked(&dlm->spinlock);
  2228. spin_lock(&res->spinlock);
  2229. for (i=0; i<3; i++) {
  2230. list_for_each(iter, queue) {
  2231. /* up to the caller to make sure this node
  2232. * is alive */
  2233. lock = list_entry (iter, struct dlm_lock, list);
  2234. if (lock->ml.node != dlm->node_num) {
  2235. spin_unlock(&res->spinlock);
  2236. return lock->ml.node;
  2237. }
  2238. }
  2239. queue++;
  2240. }
  2241. spin_unlock(&res->spinlock);
  2242. mlog(0, "have not found a suitable target yet! checking domain map\n");
  2243. /* ok now we're getting desperate. pick anyone alive. */
  2244. nodenum = -1;
  2245. while (1) {
  2246. nodenum = find_next_bit(dlm->domain_map,
  2247. O2NM_MAX_NODES, nodenum+1);
  2248. mlog(0, "found %d in domain map\n", nodenum);
  2249. if (nodenum >= O2NM_MAX_NODES)
  2250. break;
  2251. if (nodenum != dlm->node_num) {
  2252. mlog(0, "picking %d\n", nodenum);
  2253. return nodenum;
  2254. }
  2255. }
  2256. mlog(0, "giving up. no master to migrate to\n");
  2257. return DLM_LOCK_RES_OWNER_UNKNOWN;
  2258. }
  2259. /* this is called by the new master once all lockres
  2260. * data has been received */
  2261. static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
  2262. struct dlm_lock_resource *res,
  2263. u8 master, u8 new_master,
  2264. struct dlm_node_iter *iter)
  2265. {
  2266. struct dlm_migrate_request migrate;
  2267. int ret, status = 0;
  2268. int nodenum;
  2269. memset(&migrate, 0, sizeof(migrate));
  2270. migrate.namelen = res->lockname.len;
  2271. memcpy(migrate.name, res->lockname.name, migrate.namelen);
  2272. migrate.new_master = new_master;
  2273. migrate.master = master;
  2274. ret = 0;
  2275. /* send message to all nodes, except the master and myself */
  2276. while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
  2277. if (nodenum == master ||
  2278. nodenum == new_master)
  2279. continue;
  2280. ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
  2281. &migrate, sizeof(migrate), nodenum,
  2282. &status);
  2283. if (ret < 0)
  2284. mlog_errno(ret);
  2285. else if (status < 0) {
  2286. mlog(0, "migrate request (node %u) returned %d!\n",
  2287. nodenum, status);
  2288. ret = status;
  2289. }
  2290. }
  2291. if (ret < 0)
  2292. mlog_errno(ret);
  2293. mlog(0, "returning ret=%d\n", ret);
  2294. return ret;
  2295. }
  2296. /* if there is an existing mle for this lockres, we now know who the master is.
  2297. * (the one who sent us *this* message) we can clear it up right away.
  2298. * since the process that put the mle on the list still has a reference to it,
  2299. * we can unhash it now, set the master and wake the process. as a result,
  2300. * we will have no mle in the list to start with. now we can add an mle for
  2301. * the migration and this should be the only one found for those scanning the
  2302. * list. */
  2303. int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
  2304. {
  2305. struct dlm_ctxt *dlm = data;
  2306. struct dlm_lock_resource *res = NULL;
  2307. struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
  2308. struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
  2309. const char *name;
  2310. unsigned int namelen, hash;
  2311. int ret = 0;
  2312. if (!dlm_grab(dlm))
  2313. return -EINVAL;
  2314. name = migrate->name;
  2315. namelen = migrate->namelen;
  2316. hash = dlm_lockid_hash(name, namelen);
  2317. /* preallocate.. if this fails, abort */
  2318. mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
  2319. GFP_NOFS);
  2320. if (!mle) {
  2321. ret = -ENOMEM;
  2322. goto leave;
  2323. }
  2324. /* check for pre-existing lock */
  2325. spin_lock(&dlm->spinlock);
  2326. res = __dlm_lookup_lockres(dlm, name, namelen, hash);
  2327. spin_lock(&dlm->master_lock);
  2328. if (res) {
  2329. spin_lock(&res->spinlock);
  2330. if (res->state & DLM_LOCK_RES_RECOVERING) {
  2331. /* if all is working ok, this can only mean that we got
  2332. * a migrate request from a node that we now see as
  2333. * dead. what can we do here? drop it to the floor? */
  2334. spin_unlock(&res->spinlock);
  2335. mlog(ML_ERROR, "Got a migrate request, but the "
  2336. "lockres is marked as recovering!");
  2337. kmem_cache_free(dlm_mle_cache, mle);
  2338. ret = -EINVAL; /* need a better solution */
  2339. goto unlock;
  2340. }
  2341. res->state |= DLM_LOCK_RES_MIGRATING;
  2342. spin_unlock(&res->spinlock);
  2343. }
  2344. /* ignore status. only nonzero status would BUG. */
  2345. ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
  2346. name, namelen,
  2347. migrate->new_master,
  2348. migrate->master);
  2349. unlock:
  2350. spin_unlock(&dlm->master_lock);
  2351. spin_unlock(&dlm->spinlock);
  2352. if (oldmle) {
  2353. /* master is known, detach if not already detached */
  2354. dlm_mle_detach_hb_events(dlm, oldmle);
  2355. dlm_put_mle(oldmle);
  2356. }
  2357. if (res)
  2358. dlm_lockres_put(res);
  2359. leave:
  2360. dlm_put(dlm);
  2361. return ret;
  2362. }
  2363. /* must be holding dlm->spinlock and dlm->master_lock
  2364. * when adding a migration mle, we can clear any other mles
  2365. * in the master list because we know with certainty that
  2366. * the master is "master". so we remove any old mle from
  2367. * the list after setting it's master field, and then add
  2368. * the new migration mle. this way we can hold with the rule
  2369. * of having only one mle for a given lock name at all times. */
  2370. static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
  2371. struct dlm_lock_resource *res,
  2372. struct dlm_master_list_entry *mle,
  2373. struct dlm_master_list_entry **oldmle,
  2374. const char *name, unsigned int namelen,
  2375. u8 new_master, u8 master)
  2376. {
  2377. int found;
  2378. int ret = 0;
  2379. *oldmle = NULL;
  2380. mlog_entry_void();
  2381. assert_spin_locked(&dlm->spinlock);
  2382. assert_spin_locked(&dlm->master_lock);
  2383. /* caller is responsible for any ref taken here on oldmle */
  2384. found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
  2385. if (found) {
  2386. struct dlm_master_list_entry *tmp = *oldmle;
  2387. spin_lock(&tmp->spinlock);
  2388. if (tmp->type == DLM_MLE_MIGRATION) {
  2389. if (master == dlm->node_num) {
  2390. /* ah another process raced me to it */
  2391. mlog(0, "tried to migrate %.*s, but some "
  2392. "process beat me to it\n",
  2393. namelen, name);
  2394. ret = -EEXIST;
  2395. } else {
  2396. /* bad. 2 NODES are trying to migrate! */
  2397. mlog(ML_ERROR, "migration error mle: "
  2398. "master=%u new_master=%u // request: "
  2399. "master=%u new_master=%u // "
  2400. "lockres=%.*s\n",
  2401. tmp->master, tmp->new_master,
  2402. master, new_master,
  2403. namelen, name);
  2404. BUG();
  2405. }
  2406. } else {
  2407. /* this is essentially what assert_master does */
  2408. tmp->master = master;
  2409. atomic_set(&tmp->woken, 1);
  2410. wake_up(&tmp->wq);
  2411. /* remove it from the list so that only one
  2412. * mle will be found */
  2413. list_del_init(&tmp->list);
  2414. __dlm_mle_detach_hb_events(dlm, mle);
  2415. }
  2416. spin_unlock(&tmp->spinlock);
  2417. }
  2418. /* now add a migration mle to the tail of the list */
  2419. dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
  2420. mle->new_master = new_master;
  2421. mle->master = master;
  2422. /* do this for consistency with other mle types */
  2423. set_bit(new_master, mle->maybe_map);
  2424. list_add(&mle->list, &dlm->master_list);
  2425. return ret;
  2426. }
  2427. void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
  2428. {
  2429. struct list_head *iter, *iter2;
  2430. struct dlm_master_list_entry *mle;
  2431. struct dlm_lock_resource *res;
  2432. unsigned int hash;
  2433. mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
  2434. top:
  2435. assert_spin_locked(&dlm->spinlock);
  2436. /* clean the master list */
  2437. spin_lock(&dlm->master_lock);
  2438. list_for_each_safe(iter, iter2, &dlm->master_list) {
  2439. mle = list_entry(iter, struct dlm_master_list_entry, list);
  2440. BUG_ON(mle->type != DLM_MLE_BLOCK &&
  2441. mle->type != DLM_MLE_MASTER &&
  2442. mle->type != DLM_MLE_MIGRATION);
  2443. /* MASTER mles are initiated locally. the waiting
  2444. * process will notice the node map change
  2445. * shortly. let that happen as normal. */
  2446. if (mle->type == DLM_MLE_MASTER)
  2447. continue;
  2448. /* BLOCK mles are initiated by other nodes.
  2449. * need to clean up if the dead node would have
  2450. * been the master. */
  2451. if (mle->type == DLM_MLE_BLOCK) {
  2452. int bit;
  2453. spin_lock(&mle->spinlock);
  2454. bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
  2455. if (bit != dead_node) {
  2456. mlog(0, "mle found, but dead node %u would "
  2457. "not have been master\n", dead_node);
  2458. spin_unlock(&mle->spinlock);
  2459. } else {
  2460. /* must drop the refcount by one since the
  2461. * assert_master will never arrive. this
  2462. * may result in the mle being unlinked and
  2463. * freed, but there may still be a process
  2464. * waiting in the dlmlock path which is fine. */
  2465. mlog(0, "node %u was expected master\n",
  2466. dead_node);
  2467. atomic_set(&mle->woken, 1);
  2468. spin_unlock(&mle->spinlock);
  2469. wake_up(&mle->wq);
  2470. /* do not need events any longer, so detach
  2471. * from heartbeat */
  2472. __dlm_mle_detach_hb_events(dlm, mle);
  2473. __dlm_put_mle(mle);
  2474. }
  2475. continue;
  2476. }
  2477. /* everything else is a MIGRATION mle */
  2478. /* the rule for MIGRATION mles is that the master
  2479. * becomes UNKNOWN if *either* the original or
  2480. * the new master dies. all UNKNOWN lockreses
  2481. * are sent to whichever node becomes the recovery
  2482. * master. the new master is responsible for
  2483. * determining if there is still a master for
  2484. * this lockres, or if he needs to take over
  2485. * mastery. either way, this node should expect
  2486. * another message to resolve this. */
  2487. if (mle->master != dead_node &&
  2488. mle->new_master != dead_node)
  2489. continue;
  2490. /* if we have reached this point, this mle needs to
  2491. * be removed from the list and freed. */
  2492. /* remove from the list early. NOTE: unlinking
  2493. * list_head while in list_for_each_safe */
  2494. __dlm_mle_detach_hb_events(dlm, mle);
  2495. spin_lock(&mle->spinlock);
  2496. list_del_init(&mle->list);
  2497. atomic_set(&mle->woken, 1);
  2498. spin_unlock(&mle->spinlock);
  2499. wake_up(&mle->wq);
  2500. mlog(0, "%s: node %u died during migration from "
  2501. "%u to %u!\n", dlm->name, dead_node,
  2502. mle->master, mle->new_master);
  2503. /* if there is a lockres associated with this
  2504. * mle, find it and set its owner to UNKNOWN */
  2505. hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
  2506. res = __dlm_lookup_lockres(dlm, mle->u.name.name,
  2507. mle->u.name.len, hash);
  2508. if (res) {
  2509. /* unfortunately if we hit this rare case, our
  2510. * lock ordering is messed. we need to drop
  2511. * the master lock so that we can take the
  2512. * lockres lock, meaning that we will have to
  2513. * restart from the head of list. */
  2514. spin_unlock(&dlm->master_lock);
  2515. /* move lockres onto recovery list */
  2516. spin_lock(&res->spinlock);
  2517. dlm_set_lockres_owner(dlm, res,
  2518. DLM_LOCK_RES_OWNER_UNKNOWN);
  2519. dlm_move_lockres_to_recovery_list(dlm, res);
  2520. spin_unlock(&res->spinlock);
  2521. dlm_lockres_put(res);
  2522. /* about to get rid of mle, detach from heartbeat */
  2523. __dlm_mle_detach_hb_events(dlm, mle);
  2524. /* dump the mle */
  2525. spin_lock(&dlm->master_lock);
  2526. __dlm_put_mle(mle);
  2527. spin_unlock(&dlm->master_lock);
  2528. /* restart */
  2529. goto top;
  2530. }
  2531. /* this may be the last reference */
  2532. __dlm_put_mle(mle);
  2533. }
  2534. spin_unlock(&dlm->master_lock);
  2535. }
  2536. int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
  2537. u8 old_master)
  2538. {
  2539. struct dlm_node_iter iter;
  2540. int ret = 0;
  2541. spin_lock(&dlm->spinlock);
  2542. dlm_node_iter_init(dlm->domain_map, &iter);
  2543. clear_bit(old_master, iter.node_map);
  2544. clear_bit(dlm->node_num, iter.node_map);
  2545. spin_unlock(&dlm->spinlock);
  2546. mlog(0, "now time to do a migrate request to other nodes\n");
  2547. ret = dlm_do_migrate_request(dlm, res, old_master,
  2548. dlm->node_num, &iter);
  2549. if (ret < 0) {
  2550. mlog_errno(ret);
  2551. goto leave;
  2552. }
  2553. mlog(0, "doing assert master of %.*s to all except the original node\n",
  2554. res->lockname.len, res->lockname.name);
  2555. /* this call now finishes out the nodemap
  2556. * even if one or more nodes die */
  2557. ret = dlm_do_assert_master(dlm, res->lockname.name,
  2558. res->lockname.len, iter.node_map,
  2559. DLM_ASSERT_MASTER_FINISH_MIGRATION);
  2560. if (ret < 0) {
  2561. /* no longer need to retry. all living nodes contacted. */
  2562. mlog_errno(ret);
  2563. ret = 0;
  2564. }
  2565. memset(iter.node_map, 0, sizeof(iter.node_map));
  2566. set_bit(old_master, iter.node_map);
  2567. mlog(0, "doing assert master of %.*s back to %u\n",
  2568. res->lockname.len, res->lockname.name, old_master);
  2569. ret = dlm_do_assert_master(dlm, res->lockname.name,
  2570. res->lockname.len, iter.node_map,
  2571. DLM_ASSERT_MASTER_FINISH_MIGRATION);
  2572. if (ret < 0) {
  2573. mlog(0, "assert master to original master failed "
  2574. "with %d.\n", ret);
  2575. /* the only nonzero status here would be because of
  2576. * a dead original node. we're done. */
  2577. ret = 0;
  2578. }
  2579. /* all done, set the owner, clear the flag */
  2580. spin_lock(&res->spinlock);
  2581. dlm_set_lockres_owner(dlm, res, dlm->node_num);
  2582. res->state &= ~DLM_LOCK_RES_MIGRATING;
  2583. spin_unlock(&res->spinlock);
  2584. /* re-dirty it on the new master */
  2585. dlm_kick_thread(dlm, res);
  2586. wake_up(&res->wq);
  2587. leave:
  2588. return ret;
  2589. }
  2590. /*
  2591. * LOCKRES AST REFCOUNT
  2592. * this is integral to migration
  2593. */
  2594. /* for future intent to call an ast, reserve one ahead of time.
  2595. * this should be called only after waiting on the lockres
  2596. * with dlm_wait_on_lockres, and while still holding the
  2597. * spinlock after the call. */
  2598. void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
  2599. {
  2600. assert_spin_locked(&res->spinlock);
  2601. if (res->state & DLM_LOCK_RES_MIGRATING) {
  2602. __dlm_print_one_lock_resource(res);
  2603. }
  2604. BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
  2605. atomic_inc(&res->asts_reserved);
  2606. }
  2607. /*
  2608. * used to drop the reserved ast, either because it went unused,
  2609. * or because the ast/bast was actually called.
  2610. *
  2611. * also, if there is a pending migration on this lockres,
  2612. * and this was the last pending ast on the lockres,
  2613. * atomically set the MIGRATING flag before we drop the lock.
  2614. * this is how we ensure that migration can proceed with no
  2615. * asts in progress. note that it is ok if the state of the
  2616. * queues is such that a lock should be granted in the future
  2617. * or that a bast should be fired, because the new master will
  2618. * shuffle the lists on this lockres as soon as it is migrated.
  2619. */
  2620. void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
  2621. struct dlm_lock_resource *res)
  2622. {
  2623. if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
  2624. return;
  2625. if (!res->migration_pending) {
  2626. spin_unlock(&res->spinlock);
  2627. return;
  2628. }
  2629. BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
  2630. res->migration_pending = 0;
  2631. res->state |= DLM_LOCK_RES_MIGRATING;
  2632. spin_unlock(&res->spinlock);
  2633. wake_up(&res->wq);
  2634. wake_up(&dlm->migration_wq);
  2635. }