drbd_nl.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594
  1. /*
  2. drbd_nl.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/drbd.h>
  21. #include <linux/in.h>
  22. #include <linux/fs.h>
  23. #include <linux/file.h>
  24. #include <linux/slab.h>
  25. #include <linux/connector.h>
  26. #include <linux/blkpg.h>
  27. #include <linux/cpumask.h>
  28. #include "drbd_int.h"
  29. #include "drbd_req.h"
  30. #include "drbd_wrappers.h"
  31. #include <asm/unaligned.h>
  32. #include <linux/drbd_tag_magic.h>
  33. #include <linux/drbd_limits.h>
  34. #include <linux/compiler.h>
  35. #include <linux/kthread.h>
  36. static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
  37. static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
  38. static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
  39. /* see get_sb_bdev and bd_claim */
  40. static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
  41. /* Generate the tag_list to struct functions */
  42. #define NL_PACKET(name, number, fields) \
  43. static int name ## _from_tags( \
  44. unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
  45. static int name ## _from_tags( \
  46. unsigned short *tags, struct name *arg) \
  47. { \
  48. int tag; \
  49. int dlen; \
  50. \
  51. while ((tag = get_unaligned(tags++)) != TT_END) { \
  52. dlen = get_unaligned(tags++); \
  53. switch (tag_number(tag)) { \
  54. fields \
  55. default: \
  56. if (tag & T_MANDATORY) { \
  57. printk(KERN_ERR "drbd: Unknown tag: %d\n", tag_number(tag)); \
  58. return 0; \
  59. } \
  60. } \
  61. tags = (unsigned short *)((char *)tags + dlen); \
  62. } \
  63. return 1; \
  64. }
  65. #define NL_INTEGER(pn, pr, member) \
  66. case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
  67. arg->member = get_unaligned((int *)(tags)); \
  68. break;
  69. #define NL_INT64(pn, pr, member) \
  70. case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
  71. arg->member = get_unaligned((u64 *)(tags)); \
  72. break;
  73. #define NL_BIT(pn, pr, member) \
  74. case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
  75. arg->member = *(char *)(tags) ? 1 : 0; \
  76. break;
  77. #define NL_STRING(pn, pr, member, len) \
  78. case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
  79. if (dlen > len) { \
  80. printk(KERN_ERR "drbd: arg too long: %s (%u wanted, max len: %u bytes)\n", \
  81. #member, dlen, (unsigned int)len); \
  82. return 0; \
  83. } \
  84. arg->member ## _len = dlen; \
  85. memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
  86. break;
  87. #include "linux/drbd_nl.h"
  88. /* Generate the struct to tag_list functions */
  89. #define NL_PACKET(name, number, fields) \
  90. static unsigned short* \
  91. name ## _to_tags( \
  92. struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
  93. static unsigned short* \
  94. name ## _to_tags( \
  95. struct name *arg, unsigned short *tags) \
  96. { \
  97. fields \
  98. return tags; \
  99. }
  100. #define NL_INTEGER(pn, pr, member) \
  101. put_unaligned(pn | pr | TT_INTEGER, tags++); \
  102. put_unaligned(sizeof(int), tags++); \
  103. put_unaligned(arg->member, (int *)tags); \
  104. tags = (unsigned short *)((char *)tags+sizeof(int));
  105. #define NL_INT64(pn, pr, member) \
  106. put_unaligned(pn | pr | TT_INT64, tags++); \
  107. put_unaligned(sizeof(u64), tags++); \
  108. put_unaligned(arg->member, (u64 *)tags); \
  109. tags = (unsigned short *)((char *)tags+sizeof(u64));
  110. #define NL_BIT(pn, pr, member) \
  111. put_unaligned(pn | pr | TT_BIT, tags++); \
  112. put_unaligned(sizeof(char), tags++); \
  113. *(char *)tags = arg->member; \
  114. tags = (unsigned short *)((char *)tags+sizeof(char));
  115. #define NL_STRING(pn, pr, member, len) \
  116. put_unaligned(pn | pr | TT_STRING, tags++); \
  117. put_unaligned(arg->member ## _len, tags++); \
  118. memcpy(tags, arg->member, arg->member ## _len); \
  119. tags = (unsigned short *)((char *)tags + arg->member ## _len);
  120. #include "linux/drbd_nl.h"
  121. void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
  122. void drbd_nl_send_reply(struct cn_msg *, int);
  123. int drbd_khelper(struct drbd_conf *mdev, char *cmd)
  124. {
  125. char *envp[] = { "HOME=/",
  126. "TERM=linux",
  127. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  128. NULL, /* Will be set to address family */
  129. NULL, /* Will be set to address */
  130. NULL };
  131. char mb[12], af[20], ad[60], *afs;
  132. char *argv[] = {usermode_helper, cmd, mb, NULL };
  133. int ret;
  134. snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
  135. if (get_net_conf(mdev->tconn)) {
  136. switch (((struct sockaddr *)mdev->tconn->net_conf->peer_addr)->sa_family) {
  137. case AF_INET6:
  138. afs = "ipv6";
  139. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
  140. &((struct sockaddr_in6 *)mdev->tconn->net_conf->peer_addr)->sin6_addr);
  141. break;
  142. case AF_INET:
  143. afs = "ipv4";
  144. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
  145. &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
  146. break;
  147. default:
  148. afs = "ssocks";
  149. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
  150. &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
  151. }
  152. snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
  153. envp[3]=af;
  154. envp[4]=ad;
  155. put_net_conf(mdev->tconn);
  156. }
  157. /* The helper may take some time.
  158. * write out any unsynced meta data changes now */
  159. drbd_md_sync(mdev);
  160. dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
  161. drbd_bcast_ev_helper(mdev, cmd);
  162. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  163. if (ret)
  164. dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  165. usermode_helper, cmd, mb,
  166. (ret >> 8) & 0xff, ret);
  167. else
  168. dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  169. usermode_helper, cmd, mb,
  170. (ret >> 8) & 0xff, ret);
  171. if (ret < 0) /* Ignore any ERRNOs we got. */
  172. ret = 0;
  173. return ret;
  174. }
  175. enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
  176. {
  177. char *ex_to_string;
  178. int r;
  179. enum drbd_disk_state nps;
  180. enum drbd_fencing_p fp;
  181. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  182. if (get_ldev_if_state(mdev, D_CONSISTENT)) {
  183. fp = mdev->ldev->dc.fencing;
  184. put_ldev(mdev);
  185. } else {
  186. dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
  187. nps = mdev->state.pdsk;
  188. goto out;
  189. }
  190. r = drbd_khelper(mdev, "fence-peer");
  191. switch ((r>>8) & 0xff) {
  192. case 3: /* peer is inconsistent */
  193. ex_to_string = "peer is inconsistent or worse";
  194. nps = D_INCONSISTENT;
  195. break;
  196. case 4: /* peer got outdated, or was already outdated */
  197. ex_to_string = "peer was fenced";
  198. nps = D_OUTDATED;
  199. break;
  200. case 5: /* peer was down */
  201. if (mdev->state.disk == D_UP_TO_DATE) {
  202. /* we will(have) create(d) a new UUID anyways... */
  203. ex_to_string = "peer is unreachable, assumed to be dead";
  204. nps = D_OUTDATED;
  205. } else {
  206. ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
  207. nps = mdev->state.pdsk;
  208. }
  209. break;
  210. case 6: /* Peer is primary, voluntarily outdate myself.
  211. * This is useful when an unconnected R_SECONDARY is asked to
  212. * become R_PRIMARY, but finds the other peer being active. */
  213. ex_to_string = "peer is active";
  214. dev_warn(DEV, "Peer is primary, outdating myself.\n");
  215. nps = D_UNKNOWN;
  216. _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
  217. break;
  218. case 7:
  219. if (fp != FP_STONITH)
  220. dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
  221. ex_to_string = "peer was stonithed";
  222. nps = D_OUTDATED;
  223. break;
  224. default:
  225. /* The script is broken ... */
  226. nps = D_UNKNOWN;
  227. dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
  228. return nps;
  229. }
  230. dev_info(DEV, "fence-peer helper returned %d (%s)\n",
  231. (r>>8) & 0xff, ex_to_string);
  232. out:
  233. if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
  234. /* The handler was not successful... unfreeze here, the
  235. state engine can not unfreeze... */
  236. _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
  237. }
  238. return nps;
  239. }
  240. static int _try_outdate_peer_async(void *data)
  241. {
  242. struct drbd_conf *mdev = (struct drbd_conf *)data;
  243. enum drbd_disk_state nps;
  244. union drbd_state ns;
  245. nps = drbd_try_outdate_peer(mdev);
  246. /* Not using
  247. drbd_request_state(mdev, NS(pdsk, nps));
  248. here, because we might were able to re-establish the connection
  249. in the meantime. This can only partially be solved in the state's
  250. engine is_valid_state() and is_valid_state_transition()
  251. functions.
  252. nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
  253. pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
  254. therefore we have to have the pre state change check here.
  255. */
  256. spin_lock_irq(&mdev->tconn->req_lock);
  257. ns = mdev->state;
  258. if (ns.conn < C_WF_REPORT_PARAMS) {
  259. ns.pdsk = nps;
  260. _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
  261. }
  262. spin_unlock_irq(&mdev->tconn->req_lock);
  263. return 0;
  264. }
  265. void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
  266. {
  267. struct task_struct *opa;
  268. opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
  269. if (IS_ERR(opa))
  270. dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
  271. }
  272. enum drbd_state_rv
  273. drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
  274. {
  275. const int max_tries = 4;
  276. enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
  277. int try = 0;
  278. int forced = 0;
  279. union drbd_state mask, val;
  280. enum drbd_disk_state nps;
  281. if (new_role == R_PRIMARY)
  282. request_ping(mdev->tconn); /* Detect a dead peer ASAP */
  283. mutex_lock(mdev->state_mutex);
  284. mask.i = 0; mask.role = R_MASK;
  285. val.i = 0; val.role = new_role;
  286. while (try++ < max_tries) {
  287. rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
  288. /* in case we first succeeded to outdate,
  289. * but now suddenly could establish a connection */
  290. if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
  291. val.pdsk = 0;
  292. mask.pdsk = 0;
  293. continue;
  294. }
  295. if (rv == SS_NO_UP_TO_DATE_DISK && force &&
  296. (mdev->state.disk < D_UP_TO_DATE &&
  297. mdev->state.disk >= D_INCONSISTENT)) {
  298. mask.disk = D_MASK;
  299. val.disk = D_UP_TO_DATE;
  300. forced = 1;
  301. continue;
  302. }
  303. if (rv == SS_NO_UP_TO_DATE_DISK &&
  304. mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
  305. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  306. nps = drbd_try_outdate_peer(mdev);
  307. if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
  308. val.disk = D_UP_TO_DATE;
  309. mask.disk = D_MASK;
  310. }
  311. val.pdsk = nps;
  312. mask.pdsk = D_MASK;
  313. continue;
  314. }
  315. if (rv == SS_NOTHING_TO_DO)
  316. goto fail;
  317. if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
  318. nps = drbd_try_outdate_peer(mdev);
  319. if (force && nps > D_OUTDATED) {
  320. dev_warn(DEV, "Forced into split brain situation!\n");
  321. nps = D_OUTDATED;
  322. }
  323. mask.pdsk = D_MASK;
  324. val.pdsk = nps;
  325. continue;
  326. }
  327. if (rv == SS_TWO_PRIMARIES) {
  328. /* Maybe the peer is detected as dead very soon...
  329. retry at most once more in this case. */
  330. schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
  331. if (try < max_tries)
  332. try = max_tries - 1;
  333. continue;
  334. }
  335. if (rv < SS_SUCCESS) {
  336. rv = _drbd_request_state(mdev, mask, val,
  337. CS_VERBOSE + CS_WAIT_COMPLETE);
  338. if (rv < SS_SUCCESS)
  339. goto fail;
  340. }
  341. break;
  342. }
  343. if (rv < SS_SUCCESS)
  344. goto fail;
  345. if (forced)
  346. dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
  347. /* Wait until nothing is on the fly :) */
  348. wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
  349. if (new_role == R_SECONDARY) {
  350. set_disk_ro(mdev->vdisk, true);
  351. if (get_ldev(mdev)) {
  352. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  353. put_ldev(mdev);
  354. }
  355. } else {
  356. if (get_net_conf(mdev->tconn)) {
  357. mdev->tconn->net_conf->want_lose = 0;
  358. put_net_conf(mdev->tconn);
  359. }
  360. set_disk_ro(mdev->vdisk, false);
  361. if (get_ldev(mdev)) {
  362. if (((mdev->state.conn < C_CONNECTED ||
  363. mdev->state.pdsk <= D_FAILED)
  364. && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
  365. drbd_uuid_new_current(mdev);
  366. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  367. put_ldev(mdev);
  368. }
  369. }
  370. /* writeout of activity log covered areas of the bitmap
  371. * to stable storage done in after state change already */
  372. if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
  373. /* if this was forced, we should consider sync */
  374. if (forced)
  375. drbd_send_uuids(mdev);
  376. drbd_send_state(mdev);
  377. }
  378. drbd_md_sync(mdev);
  379. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  380. fail:
  381. mutex_unlock(mdev->state_mutex);
  382. return rv;
  383. }
  384. static struct drbd_conf *ensure_mdev(int minor, int create)
  385. {
  386. struct drbd_conf *mdev;
  387. if (minor >= minor_count)
  388. return NULL;
  389. mdev = minor_to_mdev(minor);
  390. if (!mdev && create) {
  391. struct gendisk *disk = NULL;
  392. mdev = drbd_new_device(minor);
  393. spin_lock_irq(&drbd_pp_lock);
  394. if (minor_table[minor] == NULL) {
  395. minor_table[minor] = mdev;
  396. disk = mdev->vdisk;
  397. mdev = NULL;
  398. } /* else: we lost the race */
  399. spin_unlock_irq(&drbd_pp_lock);
  400. if (disk) /* we won the race above */
  401. /* in case we ever add a drbd_delete_device(),
  402. * don't forget the del_gendisk! */
  403. add_disk(disk);
  404. else /* we lost the race above */
  405. drbd_free_mdev(mdev);
  406. mdev = minor_to_mdev(minor);
  407. }
  408. return mdev;
  409. }
  410. static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  411. struct drbd_nl_cfg_reply *reply)
  412. {
  413. struct primary primary_args;
  414. memset(&primary_args, 0, sizeof(struct primary));
  415. if (!primary_from_tags(nlp->tag_list, &primary_args)) {
  416. reply->ret_code = ERR_MANDATORY_TAG;
  417. return 0;
  418. }
  419. reply->ret_code =
  420. drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
  421. return 0;
  422. }
  423. static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  424. struct drbd_nl_cfg_reply *reply)
  425. {
  426. reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
  427. return 0;
  428. }
  429. /* initializes the md.*_offset members, so we are able to find
  430. * the on disk meta data */
  431. static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
  432. struct drbd_backing_dev *bdev)
  433. {
  434. sector_t md_size_sect = 0;
  435. switch (bdev->dc.meta_dev_idx) {
  436. default:
  437. /* v07 style fixed size indexed meta data */
  438. bdev->md.md_size_sect = MD_RESERVED_SECT;
  439. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  440. bdev->md.al_offset = MD_AL_OFFSET;
  441. bdev->md.bm_offset = MD_BM_OFFSET;
  442. break;
  443. case DRBD_MD_INDEX_FLEX_EXT:
  444. /* just occupy the full device; unit: sectors */
  445. bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
  446. bdev->md.md_offset = 0;
  447. bdev->md.al_offset = MD_AL_OFFSET;
  448. bdev->md.bm_offset = MD_BM_OFFSET;
  449. break;
  450. case DRBD_MD_INDEX_INTERNAL:
  451. case DRBD_MD_INDEX_FLEX_INT:
  452. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  453. /* al size is still fixed */
  454. bdev->md.al_offset = -MD_AL_SECTORS;
  455. /* we need (slightly less than) ~ this much bitmap sectors: */
  456. md_size_sect = drbd_get_capacity(bdev->backing_bdev);
  457. md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
  458. md_size_sect = BM_SECT_TO_EXT(md_size_sect);
  459. md_size_sect = ALIGN(md_size_sect, 8);
  460. /* plus the "drbd meta data super block",
  461. * and the activity log; */
  462. md_size_sect += MD_BM_OFFSET;
  463. bdev->md.md_size_sect = md_size_sect;
  464. /* bitmap offset is adjusted by 'super' block size */
  465. bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
  466. break;
  467. }
  468. }
  469. /* input size is expected to be in KB */
  470. char *ppsize(char *buf, unsigned long long size)
  471. {
  472. /* Needs 9 bytes at max including trailing NUL:
  473. * -1ULL ==> "16384 EB" */
  474. static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
  475. int base = 0;
  476. while (size >= 10000 && base < sizeof(units)-1) {
  477. /* shift + round */
  478. size = (size >> 10) + !!(size & (1<<9));
  479. base++;
  480. }
  481. sprintf(buf, "%u %cB", (unsigned)size, units[base]);
  482. return buf;
  483. }
  484. /* there is still a theoretical deadlock when called from receiver
  485. * on an D_INCONSISTENT R_PRIMARY:
  486. * remote READ does inc_ap_bio, receiver would need to receive answer
  487. * packet from remote to dec_ap_bio again.
  488. * receiver receive_sizes(), comes here,
  489. * waits for ap_bio_cnt == 0. -> deadlock.
  490. * but this cannot happen, actually, because:
  491. * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
  492. * (not connected, or bad/no disk on peer):
  493. * see drbd_fail_request_early, ap_bio_cnt is zero.
  494. * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
  495. * peer may not initiate a resize.
  496. */
  497. void drbd_suspend_io(struct drbd_conf *mdev)
  498. {
  499. set_bit(SUSPEND_IO, &mdev->flags);
  500. if (is_susp(mdev->state))
  501. return;
  502. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
  503. }
  504. void drbd_resume_io(struct drbd_conf *mdev)
  505. {
  506. clear_bit(SUSPEND_IO, &mdev->flags);
  507. wake_up(&mdev->misc_wait);
  508. }
  509. /**
  510. * drbd_determine_dev_size() - Sets the right device size obeying all constraints
  511. * @mdev: DRBD device.
  512. *
  513. * Returns 0 on success, negative return values indicate errors.
  514. * You should call drbd_md_sync() after calling this function.
  515. */
  516. enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
  517. {
  518. sector_t prev_first_sect, prev_size; /* previous meta location */
  519. sector_t la_size;
  520. sector_t size;
  521. char ppb[10];
  522. int md_moved, la_size_changed;
  523. enum determine_dev_size rv = unchanged;
  524. /* race:
  525. * application request passes inc_ap_bio,
  526. * but then cannot get an AL-reference.
  527. * this function later may wait on ap_bio_cnt == 0. -> deadlock.
  528. *
  529. * to avoid that:
  530. * Suspend IO right here.
  531. * still lock the act_log to not trigger ASSERTs there.
  532. */
  533. drbd_suspend_io(mdev);
  534. /* no wait necessary anymore, actually we could assert that */
  535. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  536. prev_first_sect = drbd_md_first_sector(mdev->ldev);
  537. prev_size = mdev->ldev->md.md_size_sect;
  538. la_size = mdev->ldev->md.la_size_sect;
  539. /* TODO: should only be some assert here, not (re)init... */
  540. drbd_md_set_sector_offsets(mdev, mdev->ldev);
  541. size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
  542. if (drbd_get_capacity(mdev->this_bdev) != size ||
  543. drbd_bm_capacity(mdev) != size) {
  544. int err;
  545. err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
  546. if (unlikely(err)) {
  547. /* currently there is only one error: ENOMEM! */
  548. size = drbd_bm_capacity(mdev)>>1;
  549. if (size == 0) {
  550. dev_err(DEV, "OUT OF MEMORY! "
  551. "Could not allocate bitmap!\n");
  552. } else {
  553. dev_err(DEV, "BM resizing failed. "
  554. "Leaving size unchanged at size = %lu KB\n",
  555. (unsigned long)size);
  556. }
  557. rv = dev_size_error;
  558. }
  559. /* racy, see comments above. */
  560. drbd_set_my_capacity(mdev, size);
  561. mdev->ldev->md.la_size_sect = size;
  562. dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
  563. (unsigned long long)size>>1);
  564. }
  565. if (rv == dev_size_error)
  566. goto out;
  567. la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
  568. md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
  569. || prev_size != mdev->ldev->md.md_size_sect;
  570. if (la_size_changed || md_moved) {
  571. int err;
  572. drbd_al_shrink(mdev); /* All extents inactive. */
  573. dev_info(DEV, "Writing the whole bitmap, %s\n",
  574. la_size_changed && md_moved ? "size changed and md moved" :
  575. la_size_changed ? "size changed" : "md moved");
  576. /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
  577. err = drbd_bitmap_io(mdev, &drbd_bm_write,
  578. "size changed", BM_LOCKED_MASK);
  579. if (err) {
  580. rv = dev_size_error;
  581. goto out;
  582. }
  583. drbd_md_mark_dirty(mdev);
  584. }
  585. if (size > la_size)
  586. rv = grew;
  587. if (size < la_size)
  588. rv = shrunk;
  589. out:
  590. lc_unlock(mdev->act_log);
  591. wake_up(&mdev->al_wait);
  592. drbd_resume_io(mdev);
  593. return rv;
  594. }
  595. sector_t
  596. drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
  597. {
  598. sector_t p_size = mdev->p_size; /* partner's disk size. */
  599. sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
  600. sector_t m_size; /* my size */
  601. sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
  602. sector_t size = 0;
  603. m_size = drbd_get_max_capacity(bdev);
  604. if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
  605. dev_warn(DEV, "Resize while not connected was forced by the user!\n");
  606. p_size = m_size;
  607. }
  608. if (p_size && m_size) {
  609. size = min_t(sector_t, p_size, m_size);
  610. } else {
  611. if (la_size) {
  612. size = la_size;
  613. if (m_size && m_size < size)
  614. size = m_size;
  615. if (p_size && p_size < size)
  616. size = p_size;
  617. } else {
  618. if (m_size)
  619. size = m_size;
  620. if (p_size)
  621. size = p_size;
  622. }
  623. }
  624. if (size == 0)
  625. dev_err(DEV, "Both nodes diskless!\n");
  626. if (u_size) {
  627. if (u_size > size)
  628. dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
  629. (unsigned long)u_size>>1, (unsigned long)size>>1);
  630. else
  631. size = u_size;
  632. }
  633. return size;
  634. }
  635. /**
  636. * drbd_check_al_size() - Ensures that the AL is of the right size
  637. * @mdev: DRBD device.
  638. *
  639. * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
  640. * failed, and 0 on success. You should call drbd_md_sync() after you called
  641. * this function.
  642. */
  643. static int drbd_check_al_size(struct drbd_conf *mdev)
  644. {
  645. struct lru_cache *n, *t;
  646. struct lc_element *e;
  647. unsigned int in_use;
  648. int i;
  649. if (!expect(mdev->sync_conf.al_extents >= DRBD_AL_EXTENTS_MIN))
  650. mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_MIN;
  651. if (mdev->act_log &&
  652. mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
  653. return 0;
  654. in_use = 0;
  655. t = mdev->act_log;
  656. n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
  657. mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
  658. if (n == NULL) {
  659. dev_err(DEV, "Cannot allocate act_log lru!\n");
  660. return -ENOMEM;
  661. }
  662. spin_lock_irq(&mdev->al_lock);
  663. if (t) {
  664. for (i = 0; i < t->nr_elements; i++) {
  665. e = lc_element_by_index(t, i);
  666. if (e->refcnt)
  667. dev_err(DEV, "refcnt(%d)==%d\n",
  668. e->lc_number, e->refcnt);
  669. in_use += e->refcnt;
  670. }
  671. }
  672. if (!in_use)
  673. mdev->act_log = n;
  674. spin_unlock_irq(&mdev->al_lock);
  675. if (in_use) {
  676. dev_err(DEV, "Activity log still in use!\n");
  677. lc_destroy(n);
  678. return -EBUSY;
  679. } else {
  680. if (t)
  681. lc_destroy(t);
  682. }
  683. drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
  684. return 0;
  685. }
  686. static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
  687. {
  688. struct request_queue * const q = mdev->rq_queue;
  689. int max_hw_sectors = max_bio_size >> 9;
  690. int max_segments = 0;
  691. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  692. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  693. max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
  694. max_segments = mdev->ldev->dc.max_bio_bvecs;
  695. put_ldev(mdev);
  696. }
  697. blk_queue_logical_block_size(q, 512);
  698. blk_queue_max_hw_sectors(q, max_hw_sectors);
  699. /* This is the workaround for "bio would need to, but cannot, be split" */
  700. blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
  701. blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
  702. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  703. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  704. blk_queue_stack_limits(q, b);
  705. if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
  706. dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
  707. q->backing_dev_info.ra_pages,
  708. b->backing_dev_info.ra_pages);
  709. q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
  710. }
  711. put_ldev(mdev);
  712. }
  713. }
  714. void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
  715. {
  716. int now, new, local, peer;
  717. now = queue_max_hw_sectors(mdev->rq_queue) << 9;
  718. local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
  719. peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
  720. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  721. local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
  722. mdev->local_max_bio_size = local;
  723. put_ldev(mdev);
  724. }
  725. /* We may ignore peer limits if the peer is modern enough.
  726. Because new from 8.3.8 onwards the peer can use multiple
  727. BIOs for a single peer_request */
  728. if (mdev->state.conn >= C_CONNECTED) {
  729. if (mdev->tconn->agreed_pro_version < 94)
  730. peer = mdev->peer_max_bio_size;
  731. else if (mdev->tconn->agreed_pro_version == 94)
  732. peer = DRBD_MAX_SIZE_H80_PACKET;
  733. else /* drbd 8.3.8 onwards */
  734. peer = DRBD_MAX_BIO_SIZE;
  735. }
  736. new = min_t(int, local, peer);
  737. if (mdev->state.role == R_PRIMARY && new < now)
  738. dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
  739. if (new != now)
  740. dev_info(DEV, "max BIO size = %u\n", new);
  741. drbd_setup_queue_param(mdev, new);
  742. }
  743. /* serialize deconfig (worker exiting, doing cleanup)
  744. * and reconfig (drbdsetup disk, drbdsetup net)
  745. *
  746. * Wait for a potentially exiting worker, then restart it,
  747. * or start a new one. Flush any pending work, there may still be an
  748. * after_state_change queued.
  749. */
  750. static void conn_reconfig_start(struct drbd_tconn *tconn)
  751. {
  752. wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
  753. wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
  754. drbd_thread_start(&tconn->worker);
  755. conn_flush_workqueue(tconn);
  756. }
  757. /* if still unconfigured, stops worker again.
  758. * if configured now, clears CONFIG_PENDING.
  759. * wakes potential waiters */
  760. static void conn_reconfig_done(struct drbd_tconn *tconn)
  761. {
  762. spin_lock_irq(&tconn->req_lock);
  763. if (conn_all_vols_unconf(tconn)) {
  764. set_bit(OBJECT_DYING, &tconn->flags);
  765. drbd_thread_stop_nowait(&tconn->worker);
  766. } else
  767. clear_bit(CONFIG_PENDING, &tconn->flags);
  768. spin_unlock_irq(&tconn->req_lock);
  769. wake_up(&tconn->ping_wait);
  770. }
  771. /* Make sure IO is suspended before calling this function(). */
  772. static void drbd_suspend_al(struct drbd_conf *mdev)
  773. {
  774. int s = 0;
  775. if (!lc_try_lock(mdev->act_log)) {
  776. dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
  777. return;
  778. }
  779. drbd_al_shrink(mdev);
  780. spin_lock_irq(&mdev->tconn->req_lock);
  781. if (mdev->state.conn < C_CONNECTED)
  782. s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
  783. spin_unlock_irq(&mdev->tconn->req_lock);
  784. lc_unlock(mdev->act_log);
  785. if (s)
  786. dev_info(DEV, "Suspended AL updates\n");
  787. }
  788. /* does always return 0;
  789. * interesting return code is in reply->ret_code */
  790. static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  791. struct drbd_nl_cfg_reply *reply)
  792. {
  793. enum drbd_ret_code retcode;
  794. enum determine_dev_size dd;
  795. sector_t max_possible_sectors;
  796. sector_t min_md_device_sectors;
  797. struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
  798. struct block_device *bdev;
  799. struct lru_cache *resync_lru = NULL;
  800. union drbd_state ns, os;
  801. enum drbd_state_rv rv;
  802. int cp_discovered = 0;
  803. conn_reconfig_start(mdev->tconn);
  804. /* if you want to reconfigure, please tear down first */
  805. if (mdev->state.disk > D_DISKLESS) {
  806. retcode = ERR_DISK_CONFIGURED;
  807. goto fail;
  808. }
  809. /* It may just now have detached because of IO error. Make sure
  810. * drbd_ldev_destroy is done already, we may end up here very fast,
  811. * e.g. if someone calls attach from the on-io-error handler,
  812. * to realize a "hot spare" feature (not that I'd recommend that) */
  813. wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
  814. /* allocation not in the IO path, cqueue thread context */
  815. nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
  816. if (!nbc) {
  817. retcode = ERR_NOMEM;
  818. goto fail;
  819. }
  820. nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
  821. nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
  822. nbc->dc.fencing = DRBD_FENCING_DEF;
  823. nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
  824. if (!disk_conf_from_tags(nlp->tag_list, &nbc->dc)) {
  825. retcode = ERR_MANDATORY_TAG;
  826. goto fail;
  827. }
  828. if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
  829. retcode = ERR_MD_IDX_INVALID;
  830. goto fail;
  831. }
  832. if (get_net_conf(mdev->tconn)) {
  833. int prot = mdev->tconn->net_conf->wire_protocol;
  834. put_net_conf(mdev->tconn);
  835. if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
  836. retcode = ERR_STONITH_AND_PROT_A;
  837. goto fail;
  838. }
  839. }
  840. bdev = blkdev_get_by_path(nbc->dc.backing_dev,
  841. FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
  842. if (IS_ERR(bdev)) {
  843. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
  844. PTR_ERR(bdev));
  845. retcode = ERR_OPEN_DISK;
  846. goto fail;
  847. }
  848. nbc->backing_bdev = bdev;
  849. /*
  850. * meta_dev_idx >= 0: external fixed size, possibly multiple
  851. * drbd sharing one meta device. TODO in that case, paranoia
  852. * check that [md_bdev, meta_dev_idx] is not yet used by some
  853. * other drbd minor! (if you use drbd.conf + drbdadm, that
  854. * should check it for you already; but if you don't, or
  855. * someone fooled it, we need to double check here)
  856. */
  857. bdev = blkdev_get_by_path(nbc->dc.meta_dev,
  858. FMODE_READ | FMODE_WRITE | FMODE_EXCL,
  859. (nbc->dc.meta_dev_idx < 0) ?
  860. (void *)mdev : (void *)drbd_m_holder);
  861. if (IS_ERR(bdev)) {
  862. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
  863. PTR_ERR(bdev));
  864. retcode = ERR_OPEN_MD_DISK;
  865. goto fail;
  866. }
  867. nbc->md_bdev = bdev;
  868. if ((nbc->backing_bdev == nbc->md_bdev) !=
  869. (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
  870. nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
  871. retcode = ERR_MD_IDX_INVALID;
  872. goto fail;
  873. }
  874. resync_lru = lc_create("resync", drbd_bm_ext_cache,
  875. 1, 61, sizeof(struct bm_extent),
  876. offsetof(struct bm_extent, lce));
  877. if (!resync_lru) {
  878. retcode = ERR_NOMEM;
  879. goto fail;
  880. }
  881. /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
  882. drbd_md_set_sector_offsets(mdev, nbc);
  883. if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
  884. dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
  885. (unsigned long long) drbd_get_max_capacity(nbc),
  886. (unsigned long long) nbc->dc.disk_size);
  887. retcode = ERR_DISK_TO_SMALL;
  888. goto fail;
  889. }
  890. if (nbc->dc.meta_dev_idx < 0) {
  891. max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
  892. /* at least one MB, otherwise it does not make sense */
  893. min_md_device_sectors = (2<<10);
  894. } else {
  895. max_possible_sectors = DRBD_MAX_SECTORS;
  896. min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
  897. }
  898. if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
  899. retcode = ERR_MD_DISK_TO_SMALL;
  900. dev_warn(DEV, "refusing attach: md-device too small, "
  901. "at least %llu sectors needed for this meta-disk type\n",
  902. (unsigned long long) min_md_device_sectors);
  903. goto fail;
  904. }
  905. /* Make sure the new disk is big enough
  906. * (we may currently be R_PRIMARY with no local disk...) */
  907. if (drbd_get_max_capacity(nbc) <
  908. drbd_get_capacity(mdev->this_bdev)) {
  909. retcode = ERR_DISK_TO_SMALL;
  910. goto fail;
  911. }
  912. nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
  913. if (nbc->known_size > max_possible_sectors) {
  914. dev_warn(DEV, "==> truncating very big lower level device "
  915. "to currently maximum possible %llu sectors <==\n",
  916. (unsigned long long) max_possible_sectors);
  917. if (nbc->dc.meta_dev_idx >= 0)
  918. dev_warn(DEV, "==>> using internal or flexible "
  919. "meta data may help <<==\n");
  920. }
  921. drbd_suspend_io(mdev);
  922. /* also wait for the last barrier ack. */
  923. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
  924. /* and for any other previously queued work */
  925. drbd_flush_workqueue(mdev);
  926. rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
  927. retcode = rv; /* FIXME: Type mismatch. */
  928. drbd_resume_io(mdev);
  929. if (rv < SS_SUCCESS)
  930. goto fail;
  931. if (!get_ldev_if_state(mdev, D_ATTACHING))
  932. goto force_diskless;
  933. drbd_md_set_sector_offsets(mdev, nbc);
  934. if (!mdev->bitmap) {
  935. if (drbd_bm_init(mdev)) {
  936. retcode = ERR_NOMEM;
  937. goto force_diskless_dec;
  938. }
  939. }
  940. retcode = drbd_md_read(mdev, nbc);
  941. if (retcode != NO_ERROR)
  942. goto force_diskless_dec;
  943. if (mdev->state.conn < C_CONNECTED &&
  944. mdev->state.role == R_PRIMARY &&
  945. (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
  946. dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
  947. (unsigned long long)mdev->ed_uuid);
  948. retcode = ERR_DATA_NOT_CURRENT;
  949. goto force_diskless_dec;
  950. }
  951. /* Since we are diskless, fix the activity log first... */
  952. if (drbd_check_al_size(mdev)) {
  953. retcode = ERR_NOMEM;
  954. goto force_diskless_dec;
  955. }
  956. /* Prevent shrinking of consistent devices ! */
  957. if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
  958. drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
  959. dev_warn(DEV, "refusing to truncate a consistent device\n");
  960. retcode = ERR_DISK_TO_SMALL;
  961. goto force_diskless_dec;
  962. }
  963. if (!drbd_al_read_log(mdev, nbc)) {
  964. retcode = ERR_IO_MD_DISK;
  965. goto force_diskless_dec;
  966. }
  967. /* Reset the "barriers don't work" bits here, then force meta data to
  968. * be written, to ensure we determine if barriers are supported. */
  969. if (nbc->dc.no_md_flush)
  970. set_bit(MD_NO_FUA, &mdev->flags);
  971. else
  972. clear_bit(MD_NO_FUA, &mdev->flags);
  973. /* Point of no return reached.
  974. * Devices and memory are no longer released by error cleanup below.
  975. * now mdev takes over responsibility, and the state engine should
  976. * clean it up somewhere. */
  977. D_ASSERT(mdev->ldev == NULL);
  978. mdev->ldev = nbc;
  979. mdev->resync = resync_lru;
  980. nbc = NULL;
  981. resync_lru = NULL;
  982. mdev->write_ordering = WO_bdev_flush;
  983. drbd_bump_write_ordering(mdev, WO_bdev_flush);
  984. if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
  985. set_bit(CRASHED_PRIMARY, &mdev->flags);
  986. else
  987. clear_bit(CRASHED_PRIMARY, &mdev->flags);
  988. if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  989. !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
  990. set_bit(CRASHED_PRIMARY, &mdev->flags);
  991. cp_discovered = 1;
  992. }
  993. mdev->send_cnt = 0;
  994. mdev->recv_cnt = 0;
  995. mdev->read_cnt = 0;
  996. mdev->writ_cnt = 0;
  997. drbd_reconsider_max_bio_size(mdev);
  998. /* If I am currently not R_PRIMARY,
  999. * but meta data primary indicator is set,
  1000. * I just now recover from a hard crash,
  1001. * and have been R_PRIMARY before that crash.
  1002. *
  1003. * Now, if I had no connection before that crash
  1004. * (have been degraded R_PRIMARY), chances are that
  1005. * I won't find my peer now either.
  1006. *
  1007. * In that case, and _only_ in that case,
  1008. * we use the degr-wfc-timeout instead of the default,
  1009. * so we can automatically recover from a crash of a
  1010. * degraded but active "cluster" after a certain timeout.
  1011. */
  1012. clear_bit(USE_DEGR_WFC_T, &mdev->flags);
  1013. if (mdev->state.role != R_PRIMARY &&
  1014. drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  1015. !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
  1016. set_bit(USE_DEGR_WFC_T, &mdev->flags);
  1017. dd = drbd_determine_dev_size(mdev, 0);
  1018. if (dd == dev_size_error) {
  1019. retcode = ERR_NOMEM_BITMAP;
  1020. goto force_diskless_dec;
  1021. } else if (dd == grew)
  1022. set_bit(RESYNC_AFTER_NEG, &mdev->flags);
  1023. if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
  1024. dev_info(DEV, "Assuming that all blocks are out of sync "
  1025. "(aka FullSync)\n");
  1026. if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
  1027. "set_n_write from attaching", BM_LOCKED_MASK)) {
  1028. retcode = ERR_IO_MD_DISK;
  1029. goto force_diskless_dec;
  1030. }
  1031. } else {
  1032. if (drbd_bitmap_io(mdev, &drbd_bm_read,
  1033. "read from attaching", BM_LOCKED_MASK) < 0) {
  1034. retcode = ERR_IO_MD_DISK;
  1035. goto force_diskless_dec;
  1036. }
  1037. }
  1038. if (cp_discovered) {
  1039. drbd_al_apply_to_bm(mdev);
  1040. if (drbd_bitmap_io(mdev, &drbd_bm_write,
  1041. "crashed primary apply AL", BM_LOCKED_MASK)) {
  1042. retcode = ERR_IO_MD_DISK;
  1043. goto force_diskless_dec;
  1044. }
  1045. }
  1046. if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
  1047. drbd_suspend_al(mdev); /* IO is still suspended here... */
  1048. spin_lock_irq(&mdev->tconn->req_lock);
  1049. os = mdev->state;
  1050. ns.i = os.i;
  1051. /* If MDF_CONSISTENT is not set go into inconsistent state,
  1052. otherwise investigate MDF_WasUpToDate...
  1053. If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
  1054. otherwise into D_CONSISTENT state.
  1055. */
  1056. if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
  1057. if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
  1058. ns.disk = D_CONSISTENT;
  1059. else
  1060. ns.disk = D_OUTDATED;
  1061. } else {
  1062. ns.disk = D_INCONSISTENT;
  1063. }
  1064. if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
  1065. ns.pdsk = D_OUTDATED;
  1066. if ( ns.disk == D_CONSISTENT &&
  1067. (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
  1068. ns.disk = D_UP_TO_DATE;
  1069. /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
  1070. MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
  1071. this point, because drbd_request_state() modifies these
  1072. flags. */
  1073. /* In case we are C_CONNECTED postpone any decision on the new disk
  1074. state after the negotiation phase. */
  1075. if (mdev->state.conn == C_CONNECTED) {
  1076. mdev->new_state_tmp.i = ns.i;
  1077. ns.i = os.i;
  1078. ns.disk = D_NEGOTIATING;
  1079. /* We expect to receive up-to-date UUIDs soon.
  1080. To avoid a race in receive_state, free p_uuid while
  1081. holding req_lock. I.e. atomic with the state change */
  1082. kfree(mdev->p_uuid);
  1083. mdev->p_uuid = NULL;
  1084. }
  1085. rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
  1086. ns = mdev->state;
  1087. spin_unlock_irq(&mdev->tconn->req_lock);
  1088. if (rv < SS_SUCCESS)
  1089. goto force_diskless_dec;
  1090. if (mdev->state.role == R_PRIMARY)
  1091. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  1092. else
  1093. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  1094. drbd_md_mark_dirty(mdev);
  1095. drbd_md_sync(mdev);
  1096. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1097. put_ldev(mdev);
  1098. reply->ret_code = retcode;
  1099. conn_reconfig_done(mdev->tconn);
  1100. return 0;
  1101. force_diskless_dec:
  1102. put_ldev(mdev);
  1103. force_diskless:
  1104. drbd_force_state(mdev, NS(disk, D_FAILED));
  1105. drbd_md_sync(mdev);
  1106. fail:
  1107. if (nbc) {
  1108. if (nbc->backing_bdev)
  1109. blkdev_put(nbc->backing_bdev,
  1110. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1111. if (nbc->md_bdev)
  1112. blkdev_put(nbc->md_bdev,
  1113. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1114. kfree(nbc);
  1115. }
  1116. lc_destroy(resync_lru);
  1117. reply->ret_code = retcode;
  1118. conn_reconfig_done(mdev->tconn);
  1119. return 0;
  1120. }
  1121. /* Detaching the disk is a process in multiple stages. First we need to lock
  1122. * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
  1123. * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
  1124. * internal references as well.
  1125. * Only then we have finally detached. */
  1126. static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1127. struct drbd_nl_cfg_reply *reply)
  1128. {
  1129. enum drbd_ret_code retcode;
  1130. int ret;
  1131. drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
  1132. retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
  1133. /* D_FAILED will transition to DISKLESS. */
  1134. ret = wait_event_interruptible(mdev->misc_wait,
  1135. mdev->state.disk != D_FAILED);
  1136. drbd_resume_io(mdev);
  1137. if ((int)retcode == (int)SS_IS_DISKLESS)
  1138. retcode = SS_NOTHING_TO_DO;
  1139. if (ret)
  1140. retcode = ERR_INTR;
  1141. reply->ret_code = retcode;
  1142. return 0;
  1143. }
  1144. static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1145. struct drbd_nl_cfg_reply *reply)
  1146. {
  1147. int i;
  1148. enum drbd_ret_code retcode;
  1149. struct net_conf *new_conf = NULL;
  1150. struct crypto_hash *tfm = NULL;
  1151. struct crypto_hash *integrity_w_tfm = NULL;
  1152. struct crypto_hash *integrity_r_tfm = NULL;
  1153. struct drbd_conf *odev;
  1154. char hmac_name[CRYPTO_MAX_ALG_NAME];
  1155. void *int_dig_out = NULL;
  1156. void *int_dig_in = NULL;
  1157. void *int_dig_vv = NULL;
  1158. struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
  1159. conn_reconfig_start(mdev->tconn);
  1160. if (mdev->state.conn > C_STANDALONE) {
  1161. retcode = ERR_NET_CONFIGURED;
  1162. goto fail;
  1163. }
  1164. /* allocation not in the IO path, cqueue thread context */
  1165. new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
  1166. if (!new_conf) {
  1167. retcode = ERR_NOMEM;
  1168. goto fail;
  1169. }
  1170. new_conf->timeout = DRBD_TIMEOUT_DEF;
  1171. new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
  1172. new_conf->ping_int = DRBD_PING_INT_DEF;
  1173. new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
  1174. new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
  1175. new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
  1176. new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
  1177. new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
  1178. new_conf->ko_count = DRBD_KO_COUNT_DEF;
  1179. new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
  1180. new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
  1181. new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
  1182. new_conf->want_lose = 0;
  1183. new_conf->two_primaries = 0;
  1184. new_conf->wire_protocol = DRBD_PROT_C;
  1185. new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
  1186. new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
  1187. new_conf->on_congestion = DRBD_ON_CONGESTION_DEF;
  1188. new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF;
  1189. if (!net_conf_from_tags(nlp->tag_list, new_conf)) {
  1190. retcode = ERR_MANDATORY_TAG;
  1191. goto fail;
  1192. }
  1193. if (new_conf->two_primaries
  1194. && (new_conf->wire_protocol != DRBD_PROT_C)) {
  1195. retcode = ERR_NOT_PROTO_C;
  1196. goto fail;
  1197. }
  1198. if (get_ldev(mdev)) {
  1199. enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
  1200. put_ldev(mdev);
  1201. if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
  1202. retcode = ERR_STONITH_AND_PROT_A;
  1203. goto fail;
  1204. }
  1205. }
  1206. if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
  1207. retcode = ERR_CONG_NOT_PROTO_A;
  1208. goto fail;
  1209. }
  1210. if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
  1211. retcode = ERR_DISCARD;
  1212. goto fail;
  1213. }
  1214. retcode = NO_ERROR;
  1215. new_my_addr = (struct sockaddr *)&new_conf->my_addr;
  1216. new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
  1217. for (i = 0; i < minor_count; i++) {
  1218. odev = minor_to_mdev(i);
  1219. if (!odev || odev == mdev)
  1220. continue;
  1221. if (get_net_conf(odev->tconn)) {
  1222. taken_addr = (struct sockaddr *)&odev->tconn->net_conf->my_addr;
  1223. if (new_conf->my_addr_len == odev->tconn->net_conf->my_addr_len &&
  1224. !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
  1225. retcode = ERR_LOCAL_ADDR;
  1226. taken_addr = (struct sockaddr *)&odev->tconn->net_conf->peer_addr;
  1227. if (new_conf->peer_addr_len == odev->tconn->net_conf->peer_addr_len &&
  1228. !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
  1229. retcode = ERR_PEER_ADDR;
  1230. put_net_conf(odev->tconn);
  1231. if (retcode != NO_ERROR)
  1232. goto fail;
  1233. }
  1234. }
  1235. if (new_conf->cram_hmac_alg[0] != 0) {
  1236. snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
  1237. new_conf->cram_hmac_alg);
  1238. tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
  1239. if (IS_ERR(tfm)) {
  1240. tfm = NULL;
  1241. retcode = ERR_AUTH_ALG;
  1242. goto fail;
  1243. }
  1244. if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
  1245. retcode = ERR_AUTH_ALG_ND;
  1246. goto fail;
  1247. }
  1248. }
  1249. if (new_conf->integrity_alg[0]) {
  1250. integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1251. if (IS_ERR(integrity_w_tfm)) {
  1252. integrity_w_tfm = NULL;
  1253. retcode=ERR_INTEGRITY_ALG;
  1254. goto fail;
  1255. }
  1256. if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
  1257. retcode=ERR_INTEGRITY_ALG_ND;
  1258. goto fail;
  1259. }
  1260. integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1261. if (IS_ERR(integrity_r_tfm)) {
  1262. integrity_r_tfm = NULL;
  1263. retcode=ERR_INTEGRITY_ALG;
  1264. goto fail;
  1265. }
  1266. }
  1267. ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
  1268. if (integrity_w_tfm) {
  1269. i = crypto_hash_digestsize(integrity_w_tfm);
  1270. int_dig_out = kmalloc(i, GFP_KERNEL);
  1271. if (!int_dig_out) {
  1272. retcode = ERR_NOMEM;
  1273. goto fail;
  1274. }
  1275. int_dig_in = kmalloc(i, GFP_KERNEL);
  1276. if (!int_dig_in) {
  1277. retcode = ERR_NOMEM;
  1278. goto fail;
  1279. }
  1280. int_dig_vv = kmalloc(i, GFP_KERNEL);
  1281. if (!int_dig_vv) {
  1282. retcode = ERR_NOMEM;
  1283. goto fail;
  1284. }
  1285. }
  1286. if (!mdev->bitmap) {
  1287. if(drbd_bm_init(mdev)) {
  1288. retcode = ERR_NOMEM;
  1289. goto fail;
  1290. }
  1291. }
  1292. drbd_flush_workqueue(mdev);
  1293. spin_lock_irq(&mdev->tconn->req_lock);
  1294. if (mdev->tconn->net_conf != NULL) {
  1295. retcode = ERR_NET_CONFIGURED;
  1296. spin_unlock_irq(&mdev->tconn->req_lock);
  1297. goto fail;
  1298. }
  1299. mdev->tconn->net_conf = new_conf;
  1300. mdev->send_cnt = 0;
  1301. mdev->recv_cnt = 0;
  1302. crypto_free_hash(mdev->tconn->cram_hmac_tfm);
  1303. mdev->tconn->cram_hmac_tfm = tfm;
  1304. crypto_free_hash(mdev->tconn->integrity_w_tfm);
  1305. mdev->tconn->integrity_w_tfm = integrity_w_tfm;
  1306. crypto_free_hash(mdev->tconn->integrity_r_tfm);
  1307. mdev->tconn->integrity_r_tfm = integrity_r_tfm;
  1308. kfree(mdev->tconn->int_dig_out);
  1309. kfree(mdev->tconn->int_dig_in);
  1310. kfree(mdev->tconn->int_dig_vv);
  1311. mdev->tconn->int_dig_out=int_dig_out;
  1312. mdev->tconn->int_dig_in=int_dig_in;
  1313. mdev->tconn->int_dig_vv=int_dig_vv;
  1314. retcode = _conn_request_state(mdev->tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
  1315. spin_unlock_irq(&mdev->tconn->req_lock);
  1316. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1317. reply->ret_code = retcode;
  1318. conn_reconfig_done(mdev->tconn);
  1319. return 0;
  1320. fail:
  1321. kfree(int_dig_out);
  1322. kfree(int_dig_in);
  1323. kfree(int_dig_vv);
  1324. crypto_free_hash(tfm);
  1325. crypto_free_hash(integrity_w_tfm);
  1326. crypto_free_hash(integrity_r_tfm);
  1327. kfree(new_conf);
  1328. reply->ret_code = retcode;
  1329. conn_reconfig_done(mdev->tconn);
  1330. return 0;
  1331. }
  1332. static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1333. struct drbd_nl_cfg_reply *reply)
  1334. {
  1335. struct drbd_tconn *tconn = mdev->tconn;
  1336. int retcode;
  1337. struct disconnect dc;
  1338. memset(&dc, 0, sizeof(struct disconnect));
  1339. if (!disconnect_from_tags(nlp->tag_list, &dc)) {
  1340. retcode = ERR_MANDATORY_TAG;
  1341. goto fail;
  1342. }
  1343. if (dc.force) {
  1344. spin_lock_irq(&tconn->req_lock);
  1345. if (tconn->cstate >= C_WF_CONNECTION)
  1346. _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
  1347. spin_unlock_irq(&tconn->req_lock);
  1348. goto done;
  1349. }
  1350. retcode = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
  1351. if (retcode == SS_NOTHING_TO_DO)
  1352. goto done;
  1353. else if (retcode == SS_ALREADY_STANDALONE)
  1354. goto done;
  1355. else if (retcode == SS_PRIMARY_NOP) {
  1356. /* Our state checking code wants to see the peer outdated. */
  1357. retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
  1358. pdsk, D_OUTDATED), CS_VERBOSE);
  1359. } else if (retcode == SS_CW_FAILED_BY_PEER) {
  1360. /* The peer probably wants to see us outdated. */
  1361. retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
  1362. disk, D_OUTDATED), 0);
  1363. if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
  1364. conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
  1365. retcode = SS_SUCCESS;
  1366. }
  1367. }
  1368. if (retcode < SS_SUCCESS)
  1369. goto fail;
  1370. if (wait_event_interruptible(tconn->ping_wait,
  1371. tconn->cstate != C_DISCONNECTING)) {
  1372. /* Do not test for mdev->state.conn == C_STANDALONE, since
  1373. someone else might connect us in the mean time! */
  1374. retcode = ERR_INTR;
  1375. goto fail;
  1376. }
  1377. done:
  1378. retcode = NO_ERROR;
  1379. fail:
  1380. drbd_md_sync(mdev);
  1381. reply->ret_code = retcode;
  1382. return 0;
  1383. }
  1384. void resync_after_online_grow(struct drbd_conf *mdev)
  1385. {
  1386. int iass; /* I am sync source */
  1387. dev_info(DEV, "Resync of new storage after online grow\n");
  1388. if (mdev->state.role != mdev->state.peer)
  1389. iass = (mdev->state.role == R_PRIMARY);
  1390. else
  1391. iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
  1392. if (iass)
  1393. drbd_start_resync(mdev, C_SYNC_SOURCE);
  1394. else
  1395. _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
  1396. }
  1397. static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1398. struct drbd_nl_cfg_reply *reply)
  1399. {
  1400. struct resize rs;
  1401. int retcode = NO_ERROR;
  1402. enum determine_dev_size dd;
  1403. enum dds_flags ddsf;
  1404. memset(&rs, 0, sizeof(struct resize));
  1405. if (!resize_from_tags(nlp->tag_list, &rs)) {
  1406. retcode = ERR_MANDATORY_TAG;
  1407. goto fail;
  1408. }
  1409. if (mdev->state.conn > C_CONNECTED) {
  1410. retcode = ERR_RESIZE_RESYNC;
  1411. goto fail;
  1412. }
  1413. if (mdev->state.role == R_SECONDARY &&
  1414. mdev->state.peer == R_SECONDARY) {
  1415. retcode = ERR_NO_PRIMARY;
  1416. goto fail;
  1417. }
  1418. if (!get_ldev(mdev)) {
  1419. retcode = ERR_NO_DISK;
  1420. goto fail;
  1421. }
  1422. if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
  1423. retcode = ERR_NEED_APV_93;
  1424. goto fail;
  1425. }
  1426. if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
  1427. mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
  1428. mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
  1429. ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
  1430. dd = drbd_determine_dev_size(mdev, ddsf);
  1431. drbd_md_sync(mdev);
  1432. put_ldev(mdev);
  1433. if (dd == dev_size_error) {
  1434. retcode = ERR_NOMEM_BITMAP;
  1435. goto fail;
  1436. }
  1437. if (mdev->state.conn == C_CONNECTED) {
  1438. if (dd == grew)
  1439. set_bit(RESIZE_PENDING, &mdev->flags);
  1440. drbd_send_uuids(mdev);
  1441. drbd_send_sizes(mdev, 1, ddsf);
  1442. }
  1443. fail:
  1444. reply->ret_code = retcode;
  1445. return 0;
  1446. }
  1447. static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1448. struct drbd_nl_cfg_reply *reply)
  1449. {
  1450. int retcode = NO_ERROR;
  1451. int err;
  1452. int ovr; /* online verify running */
  1453. int rsr; /* re-sync running */
  1454. struct crypto_hash *verify_tfm = NULL;
  1455. struct crypto_hash *csums_tfm = NULL;
  1456. struct syncer_conf sc;
  1457. cpumask_var_t new_cpu_mask;
  1458. int *rs_plan_s = NULL;
  1459. int fifo_size;
  1460. if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
  1461. retcode = ERR_NOMEM;
  1462. goto fail;
  1463. }
  1464. if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
  1465. memset(&sc, 0, sizeof(struct syncer_conf));
  1466. sc.rate = DRBD_RATE_DEF;
  1467. sc.after = DRBD_AFTER_DEF;
  1468. sc.al_extents = DRBD_AL_EXTENTS_DEF;
  1469. sc.on_no_data = DRBD_ON_NO_DATA_DEF;
  1470. sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
  1471. sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
  1472. sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
  1473. sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
  1474. sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
  1475. } else
  1476. memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
  1477. if (!syncer_conf_from_tags(nlp->tag_list, &sc)) {
  1478. retcode = ERR_MANDATORY_TAG;
  1479. goto fail;
  1480. }
  1481. /* re-sync running */
  1482. rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
  1483. mdev->state.conn == C_SYNC_TARGET ||
  1484. mdev->state.conn == C_PAUSED_SYNC_S ||
  1485. mdev->state.conn == C_PAUSED_SYNC_T );
  1486. if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
  1487. retcode = ERR_CSUMS_RESYNC_RUNNING;
  1488. goto fail;
  1489. }
  1490. if (!rsr && sc.csums_alg[0]) {
  1491. csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
  1492. if (IS_ERR(csums_tfm)) {
  1493. csums_tfm = NULL;
  1494. retcode = ERR_CSUMS_ALG;
  1495. goto fail;
  1496. }
  1497. if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
  1498. retcode = ERR_CSUMS_ALG_ND;
  1499. goto fail;
  1500. }
  1501. }
  1502. /* online verify running */
  1503. ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
  1504. if (ovr) {
  1505. if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
  1506. retcode = ERR_VERIFY_RUNNING;
  1507. goto fail;
  1508. }
  1509. }
  1510. if (!ovr && sc.verify_alg[0]) {
  1511. verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
  1512. if (IS_ERR(verify_tfm)) {
  1513. verify_tfm = NULL;
  1514. retcode = ERR_VERIFY_ALG;
  1515. goto fail;
  1516. }
  1517. if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
  1518. retcode = ERR_VERIFY_ALG_ND;
  1519. goto fail;
  1520. }
  1521. }
  1522. /* silently ignore cpu mask on UP kernel */
  1523. if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
  1524. err = __bitmap_parse(sc.cpu_mask, 32, 0,
  1525. cpumask_bits(new_cpu_mask), nr_cpu_ids);
  1526. if (err) {
  1527. dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
  1528. retcode = ERR_CPU_MASK_PARSE;
  1529. goto fail;
  1530. }
  1531. }
  1532. if (!expect(sc.rate >= 1))
  1533. sc.rate = 1;
  1534. /* clip to allowed range */
  1535. if (!expect(sc.al_extents >= DRBD_AL_EXTENTS_MIN))
  1536. sc.al_extents = DRBD_AL_EXTENTS_MIN;
  1537. if (!expect(sc.al_extents <= DRBD_AL_EXTENTS_MAX))
  1538. sc.al_extents = DRBD_AL_EXTENTS_MAX;
  1539. /* to avoid spurious errors when configuring minors before configuring
  1540. * the minors they depend on: if necessary, first create the minor we
  1541. * depend on */
  1542. if (sc.after >= 0)
  1543. ensure_mdev(sc.after, 1);
  1544. /* most sanity checks done, try to assign the new sync-after
  1545. * dependency. need to hold the global lock in there,
  1546. * to avoid a race in the dependency loop check. */
  1547. retcode = drbd_alter_sa(mdev, sc.after);
  1548. if (retcode != NO_ERROR)
  1549. goto fail;
  1550. fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
  1551. if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
  1552. rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
  1553. if (!rs_plan_s) {
  1554. dev_err(DEV, "kmalloc of fifo_buffer failed");
  1555. retcode = ERR_NOMEM;
  1556. goto fail;
  1557. }
  1558. }
  1559. /* ok, assign the rest of it as well.
  1560. * lock against receive_SyncParam() */
  1561. spin_lock(&mdev->peer_seq_lock);
  1562. mdev->sync_conf = sc;
  1563. if (!rsr) {
  1564. crypto_free_hash(mdev->csums_tfm);
  1565. mdev->csums_tfm = csums_tfm;
  1566. csums_tfm = NULL;
  1567. }
  1568. if (!ovr) {
  1569. crypto_free_hash(mdev->verify_tfm);
  1570. mdev->verify_tfm = verify_tfm;
  1571. verify_tfm = NULL;
  1572. }
  1573. if (fifo_size != mdev->rs_plan_s.size) {
  1574. kfree(mdev->rs_plan_s.values);
  1575. mdev->rs_plan_s.values = rs_plan_s;
  1576. mdev->rs_plan_s.size = fifo_size;
  1577. mdev->rs_planed = 0;
  1578. rs_plan_s = NULL;
  1579. }
  1580. spin_unlock(&mdev->peer_seq_lock);
  1581. if (get_ldev(mdev)) {
  1582. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  1583. drbd_al_shrink(mdev);
  1584. err = drbd_check_al_size(mdev);
  1585. lc_unlock(mdev->act_log);
  1586. wake_up(&mdev->al_wait);
  1587. put_ldev(mdev);
  1588. drbd_md_sync(mdev);
  1589. if (err) {
  1590. retcode = ERR_NOMEM;
  1591. goto fail;
  1592. }
  1593. }
  1594. if (mdev->state.conn >= C_CONNECTED)
  1595. drbd_send_sync_param(mdev, &sc);
  1596. if (!cpumask_equal(mdev->tconn->cpu_mask, new_cpu_mask)) {
  1597. cpumask_copy(mdev->tconn->cpu_mask, new_cpu_mask);
  1598. drbd_calc_cpu_mask(mdev->tconn);
  1599. mdev->tconn->receiver.reset_cpu_mask = 1;
  1600. mdev->tconn->asender.reset_cpu_mask = 1;
  1601. mdev->tconn->worker.reset_cpu_mask = 1;
  1602. }
  1603. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1604. fail:
  1605. kfree(rs_plan_s);
  1606. free_cpumask_var(new_cpu_mask);
  1607. crypto_free_hash(csums_tfm);
  1608. crypto_free_hash(verify_tfm);
  1609. reply->ret_code = retcode;
  1610. return 0;
  1611. }
  1612. static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1613. struct drbd_nl_cfg_reply *reply)
  1614. {
  1615. int retcode;
  1616. /* If there is still bitmap IO pending, probably because of a previous
  1617. * resync just being finished, wait for it before requesting a new resync. */
  1618. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  1619. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
  1620. if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
  1621. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1622. while (retcode == SS_NEED_CONNECTION) {
  1623. spin_lock_irq(&mdev->tconn->req_lock);
  1624. if (mdev->state.conn < C_CONNECTED)
  1625. retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
  1626. spin_unlock_irq(&mdev->tconn->req_lock);
  1627. if (retcode != SS_NEED_CONNECTION)
  1628. break;
  1629. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1630. }
  1631. reply->ret_code = retcode;
  1632. return 0;
  1633. }
  1634. static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
  1635. {
  1636. int rv;
  1637. rv = drbd_bmio_set_n_write(mdev);
  1638. drbd_suspend_al(mdev);
  1639. return rv;
  1640. }
  1641. static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1642. struct drbd_nl_cfg_reply *reply)
  1643. {
  1644. int retcode;
  1645. /* If there is still bitmap IO pending, probably because of a previous
  1646. * resync just being finished, wait for it before requesting a new resync. */
  1647. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  1648. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
  1649. if (retcode < SS_SUCCESS) {
  1650. if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
  1651. /* The peer will get a resync upon connect anyways. Just make that
  1652. into a full resync. */
  1653. retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
  1654. if (retcode >= SS_SUCCESS) {
  1655. if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
  1656. "set_n_write from invalidate_peer",
  1657. BM_LOCKED_SET_ALLOWED))
  1658. retcode = ERR_IO_MD_DISK;
  1659. }
  1660. } else
  1661. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
  1662. }
  1663. reply->ret_code = retcode;
  1664. return 0;
  1665. }
  1666. static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1667. struct drbd_nl_cfg_reply *reply)
  1668. {
  1669. int retcode = NO_ERROR;
  1670. if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
  1671. retcode = ERR_PAUSE_IS_SET;
  1672. reply->ret_code = retcode;
  1673. return 0;
  1674. }
  1675. static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1676. struct drbd_nl_cfg_reply *reply)
  1677. {
  1678. int retcode = NO_ERROR;
  1679. union drbd_state s;
  1680. if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
  1681. s = mdev->state;
  1682. if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
  1683. retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
  1684. s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
  1685. } else {
  1686. retcode = ERR_PAUSE_IS_CLEAR;
  1687. }
  1688. }
  1689. reply->ret_code = retcode;
  1690. return 0;
  1691. }
  1692. static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1693. struct drbd_nl_cfg_reply *reply)
  1694. {
  1695. reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
  1696. return 0;
  1697. }
  1698. static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1699. struct drbd_nl_cfg_reply *reply)
  1700. {
  1701. if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
  1702. drbd_uuid_new_current(mdev);
  1703. clear_bit(NEW_CUR_UUID, &mdev->flags);
  1704. }
  1705. drbd_suspend_io(mdev);
  1706. reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
  1707. if (reply->ret_code == SS_SUCCESS) {
  1708. if (mdev->state.conn < C_CONNECTED)
  1709. tl_clear(mdev->tconn);
  1710. if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
  1711. tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
  1712. }
  1713. drbd_resume_io(mdev);
  1714. return 0;
  1715. }
  1716. static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1717. struct drbd_nl_cfg_reply *reply)
  1718. {
  1719. reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
  1720. return 0;
  1721. }
  1722. static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1723. struct drbd_nl_cfg_reply *reply)
  1724. {
  1725. unsigned short *tl;
  1726. tl = reply->tag_list;
  1727. if (get_ldev(mdev)) {
  1728. tl = disk_conf_to_tags(&mdev->ldev->dc, tl);
  1729. put_ldev(mdev);
  1730. }
  1731. if (get_net_conf(mdev->tconn)) {
  1732. tl = net_conf_to_tags(mdev->tconn->net_conf, tl);
  1733. put_net_conf(mdev->tconn);
  1734. }
  1735. tl = syncer_conf_to_tags(&mdev->sync_conf, tl);
  1736. put_unaligned(TT_END, tl++); /* Close the tag list */
  1737. return (int)((char *)tl - (char *)reply->tag_list);
  1738. }
  1739. static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1740. struct drbd_nl_cfg_reply *reply)
  1741. {
  1742. unsigned short *tl = reply->tag_list;
  1743. union drbd_state s = mdev->state;
  1744. unsigned long rs_left;
  1745. unsigned int res;
  1746. tl = get_state_to_tags((struct get_state *)&s, tl);
  1747. /* no local ref, no bitmap, no syncer progress. */
  1748. if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
  1749. if (get_ldev(mdev)) {
  1750. drbd_get_syncer_progress(mdev, &rs_left, &res);
  1751. tl = tl_add_int(tl, T_sync_progress, &res);
  1752. put_ldev(mdev);
  1753. }
  1754. }
  1755. put_unaligned(TT_END, tl++); /* Close the tag list */
  1756. return (int)((char *)tl - (char *)reply->tag_list);
  1757. }
  1758. static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1759. struct drbd_nl_cfg_reply *reply)
  1760. {
  1761. unsigned short *tl;
  1762. tl = reply->tag_list;
  1763. if (get_ldev(mdev)) {
  1764. tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
  1765. tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
  1766. put_ldev(mdev);
  1767. }
  1768. put_unaligned(TT_END, tl++); /* Close the tag list */
  1769. return (int)((char *)tl - (char *)reply->tag_list);
  1770. }
  1771. /**
  1772. * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
  1773. * @mdev: DRBD device.
  1774. * @nlp: Netlink/connector packet from drbdsetup
  1775. * @reply: Reply packet for drbdsetup
  1776. */
  1777. static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1778. struct drbd_nl_cfg_reply *reply)
  1779. {
  1780. unsigned short *tl;
  1781. char rv;
  1782. tl = reply->tag_list;
  1783. rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
  1784. test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
  1785. tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
  1786. put_unaligned(TT_END, tl++); /* Close the tag list */
  1787. return (int)((char *)tl - (char *)reply->tag_list);
  1788. }
  1789. static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1790. struct drbd_nl_cfg_reply *reply)
  1791. {
  1792. /* default to resume from last known position, if possible */
  1793. struct start_ov args =
  1794. { .start_sector = mdev->ov_start_sector };
  1795. if (!start_ov_from_tags(nlp->tag_list, &args)) {
  1796. reply->ret_code = ERR_MANDATORY_TAG;
  1797. return 0;
  1798. }
  1799. /* If there is still bitmap IO pending, e.g. previous resync or verify
  1800. * just being finished, wait for it before requesting a new resync. */
  1801. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  1802. /* w_make_ov_request expects position to be aligned */
  1803. mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
  1804. reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
  1805. return 0;
  1806. }
  1807. static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1808. struct drbd_nl_cfg_reply *reply)
  1809. {
  1810. int retcode = NO_ERROR;
  1811. int skip_initial_sync = 0;
  1812. int err;
  1813. struct new_c_uuid args;
  1814. memset(&args, 0, sizeof(struct new_c_uuid));
  1815. if (!new_c_uuid_from_tags(nlp->tag_list, &args)) {
  1816. reply->ret_code = ERR_MANDATORY_TAG;
  1817. return 0;
  1818. }
  1819. mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
  1820. if (!get_ldev(mdev)) {
  1821. retcode = ERR_NO_DISK;
  1822. goto out;
  1823. }
  1824. /* this is "skip initial sync", assume to be clean */
  1825. if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
  1826. mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
  1827. dev_info(DEV, "Preparing to skip initial sync\n");
  1828. skip_initial_sync = 1;
  1829. } else if (mdev->state.conn != C_STANDALONE) {
  1830. retcode = ERR_CONNECTED;
  1831. goto out_dec;
  1832. }
  1833. drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
  1834. drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
  1835. if (args.clear_bm) {
  1836. err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
  1837. "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
  1838. if (err) {
  1839. dev_err(DEV, "Writing bitmap failed with %d\n",err);
  1840. retcode = ERR_IO_MD_DISK;
  1841. }
  1842. if (skip_initial_sync) {
  1843. drbd_send_uuids_skip_initial_sync(mdev);
  1844. _drbd_uuid_set(mdev, UI_BITMAP, 0);
  1845. drbd_print_uuids(mdev, "cleared bitmap UUID");
  1846. spin_lock_irq(&mdev->tconn->req_lock);
  1847. _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
  1848. CS_VERBOSE, NULL);
  1849. spin_unlock_irq(&mdev->tconn->req_lock);
  1850. }
  1851. }
  1852. drbd_md_sync(mdev);
  1853. out_dec:
  1854. put_ldev(mdev);
  1855. out:
  1856. mutex_unlock(mdev->state_mutex);
  1857. reply->ret_code = retcode;
  1858. return 0;
  1859. }
  1860. struct cn_handler_struct {
  1861. int (*function)(struct drbd_conf *,
  1862. struct drbd_nl_cfg_req *,
  1863. struct drbd_nl_cfg_reply *);
  1864. int reply_body_size;
  1865. };
  1866. static struct cn_handler_struct cnd_table[] = {
  1867. [ P_primary ] = { &drbd_nl_primary, 0 },
  1868. [ P_secondary ] = { &drbd_nl_secondary, 0 },
  1869. [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
  1870. [ P_detach ] = { &drbd_nl_detach, 0 },
  1871. [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
  1872. [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
  1873. [ P_resize ] = { &drbd_nl_resize, 0 },
  1874. [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
  1875. [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
  1876. [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
  1877. [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
  1878. [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
  1879. [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
  1880. [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
  1881. [ P_outdate ] = { &drbd_nl_outdate, 0 },
  1882. [ P_get_config ] = { &drbd_nl_get_config,
  1883. sizeof(struct syncer_conf_tag_len_struct) +
  1884. sizeof(struct disk_conf_tag_len_struct) +
  1885. sizeof(struct net_conf_tag_len_struct) },
  1886. [ P_get_state ] = { &drbd_nl_get_state,
  1887. sizeof(struct get_state_tag_len_struct) +
  1888. sizeof(struct sync_progress_tag_len_struct) },
  1889. [ P_get_uuids ] = { &drbd_nl_get_uuids,
  1890. sizeof(struct get_uuids_tag_len_struct) },
  1891. [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
  1892. sizeof(struct get_timeout_flag_tag_len_struct)},
  1893. [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
  1894. [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
  1895. };
  1896. static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
  1897. {
  1898. struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
  1899. struct cn_handler_struct *cm;
  1900. struct cn_msg *cn_reply;
  1901. struct drbd_nl_cfg_reply *reply;
  1902. struct drbd_conf *mdev;
  1903. int retcode, rr;
  1904. int reply_size = sizeof(struct cn_msg)
  1905. + sizeof(struct drbd_nl_cfg_reply)
  1906. + sizeof(short int);
  1907. if (!try_module_get(THIS_MODULE)) {
  1908. printk(KERN_ERR "drbd: try_module_get() failed!\n");
  1909. return;
  1910. }
  1911. if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
  1912. retcode = ERR_PERM;
  1913. goto fail;
  1914. }
  1915. mdev = ensure_mdev(nlp->drbd_minor,
  1916. (nlp->flags & DRBD_NL_CREATE_DEVICE));
  1917. if (!mdev) {
  1918. retcode = ERR_MINOR_INVALID;
  1919. goto fail;
  1920. }
  1921. if (nlp->packet_type >= P_nl_after_last_packet ||
  1922. nlp->packet_type == P_return_code_only) {
  1923. retcode = ERR_PACKET_NR;
  1924. goto fail;
  1925. }
  1926. cm = cnd_table + nlp->packet_type;
  1927. /* This may happen if packet number is 0: */
  1928. if (cm->function == NULL) {
  1929. retcode = ERR_PACKET_NR;
  1930. goto fail;
  1931. }
  1932. reply_size += cm->reply_body_size;
  1933. /* allocation not in the IO path, cqueue thread context */
  1934. cn_reply = kzalloc(reply_size, GFP_KERNEL);
  1935. if (!cn_reply) {
  1936. retcode = ERR_NOMEM;
  1937. goto fail;
  1938. }
  1939. reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
  1940. reply->packet_type =
  1941. cm->reply_body_size ? nlp->packet_type : P_return_code_only;
  1942. reply->minor = nlp->drbd_minor;
  1943. reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
  1944. /* reply->tag_list; might be modified by cm->function. */
  1945. rr = cm->function(mdev, nlp, reply);
  1946. cn_reply->id = req->id;
  1947. cn_reply->seq = req->seq;
  1948. cn_reply->ack = req->ack + 1;
  1949. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
  1950. cn_reply->flags = 0;
  1951. rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
  1952. if (rr && rr != -ESRCH)
  1953. printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
  1954. kfree(cn_reply);
  1955. module_put(THIS_MODULE);
  1956. return;
  1957. fail:
  1958. drbd_nl_send_reply(req, retcode);
  1959. module_put(THIS_MODULE);
  1960. }
  1961. static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
  1962. static unsigned short *
  1963. __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
  1964. unsigned short len, int nul_terminated)
  1965. {
  1966. unsigned short l = tag_descriptions[tag_number(tag)].max_len;
  1967. len = (len < l) ? len : l;
  1968. put_unaligned(tag, tl++);
  1969. put_unaligned(len, tl++);
  1970. memcpy(tl, data, len);
  1971. tl = (unsigned short*)((char*)tl + len);
  1972. if (nul_terminated)
  1973. *((char*)tl - 1) = 0;
  1974. return tl;
  1975. }
  1976. static unsigned short *
  1977. tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
  1978. {
  1979. return __tl_add_blob(tl, tag, data, len, 0);
  1980. }
  1981. static unsigned short *
  1982. tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
  1983. {
  1984. return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
  1985. }
  1986. static unsigned short *
  1987. tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
  1988. {
  1989. put_unaligned(tag, tl++);
  1990. switch(tag_type(tag)) {
  1991. case TT_INTEGER:
  1992. put_unaligned(sizeof(int), tl++);
  1993. put_unaligned(*(int *)val, (int *)tl);
  1994. tl = (unsigned short*)((char*)tl+sizeof(int));
  1995. break;
  1996. case TT_INT64:
  1997. put_unaligned(sizeof(u64), tl++);
  1998. put_unaligned(*(u64 *)val, (u64 *)tl);
  1999. tl = (unsigned short*)((char*)tl+sizeof(u64));
  2000. break;
  2001. default:
  2002. /* someone did something stupid. */
  2003. ;
  2004. }
  2005. return tl;
  2006. }
  2007. void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
  2008. {
  2009. char buffer[sizeof(struct cn_msg)+
  2010. sizeof(struct drbd_nl_cfg_reply)+
  2011. sizeof(struct get_state_tag_len_struct)+
  2012. sizeof(short int)];
  2013. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  2014. struct drbd_nl_cfg_reply *reply =
  2015. (struct drbd_nl_cfg_reply *)cn_reply->data;
  2016. unsigned short *tl = reply->tag_list;
  2017. /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
  2018. tl = get_state_to_tags((struct get_state *)&state, tl);
  2019. put_unaligned(TT_END, tl++); /* Close the tag list */
  2020. cn_reply->id.idx = CN_IDX_DRBD;
  2021. cn_reply->id.val = CN_VAL_DRBD;
  2022. cn_reply->seq = atomic_inc_return(&drbd_nl_seq);
  2023. cn_reply->ack = 0; /* not used here. */
  2024. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  2025. (int)((char *)tl - (char *)reply->tag_list);
  2026. cn_reply->flags = 0;
  2027. reply->packet_type = P_get_state;
  2028. reply->minor = mdev_to_minor(mdev);
  2029. reply->ret_code = NO_ERROR;
  2030. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2031. }
  2032. void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
  2033. {
  2034. char buffer[sizeof(struct cn_msg)+
  2035. sizeof(struct drbd_nl_cfg_reply)+
  2036. sizeof(struct call_helper_tag_len_struct)+
  2037. sizeof(short int)];
  2038. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  2039. struct drbd_nl_cfg_reply *reply =
  2040. (struct drbd_nl_cfg_reply *)cn_reply->data;
  2041. unsigned short *tl = reply->tag_list;
  2042. /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
  2043. tl = tl_add_str(tl, T_helper, helper_name);
  2044. put_unaligned(TT_END, tl++); /* Close the tag list */
  2045. cn_reply->id.idx = CN_IDX_DRBD;
  2046. cn_reply->id.val = CN_VAL_DRBD;
  2047. cn_reply->seq = atomic_inc_return(&drbd_nl_seq);
  2048. cn_reply->ack = 0; /* not used here. */
  2049. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  2050. (int)((char *)tl - (char *)reply->tag_list);
  2051. cn_reply->flags = 0;
  2052. reply->packet_type = P_call_helper;
  2053. reply->minor = mdev_to_minor(mdev);
  2054. reply->ret_code = NO_ERROR;
  2055. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2056. }
  2057. void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs,
  2058. const char *seen_hash, const char *calc_hash,
  2059. const struct drbd_peer_request *peer_req)
  2060. {
  2061. struct cn_msg *cn_reply;
  2062. struct drbd_nl_cfg_reply *reply;
  2063. unsigned short *tl;
  2064. struct page *page;
  2065. unsigned len;
  2066. if (!peer_req)
  2067. return;
  2068. if (!reason || !reason[0])
  2069. return;
  2070. /* apparently we have to memcpy twice, first to prepare the data for the
  2071. * struct cn_msg, then within cn_netlink_send from the cn_msg to the
  2072. * netlink skb. */
  2073. /* receiver thread context, which is not in the writeout path (of this node),
  2074. * but may be in the writeout path of the _other_ node.
  2075. * GFP_NOIO to avoid potential "distributed deadlock". */
  2076. cn_reply = kzalloc(
  2077. sizeof(struct cn_msg)+
  2078. sizeof(struct drbd_nl_cfg_reply)+
  2079. sizeof(struct dump_ee_tag_len_struct)+
  2080. sizeof(short int),
  2081. GFP_NOIO);
  2082. if (!cn_reply) {
  2083. dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, "
  2084. "sector %llu, size %u\n",
  2085. (unsigned long long)peer_req->i.sector,
  2086. peer_req->i.size);
  2087. return;
  2088. }
  2089. reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
  2090. tl = reply->tag_list;
  2091. tl = tl_add_str(tl, T_dump_ee_reason, reason);
  2092. tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
  2093. tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
  2094. tl = tl_add_int(tl, T_ee_sector, &peer_req->i.sector);
  2095. tl = tl_add_int(tl, T_ee_block_id, &peer_req->block_id);
  2096. /* dump the first 32k */
  2097. len = min_t(unsigned, peer_req->i.size, 32 << 10);
  2098. put_unaligned(T_ee_data, tl++);
  2099. put_unaligned(len, tl++);
  2100. page = peer_req->pages;
  2101. page_chain_for_each(page) {
  2102. void *d = kmap_atomic(page, KM_USER0);
  2103. unsigned l = min_t(unsigned, len, PAGE_SIZE);
  2104. memcpy(tl, d, l);
  2105. kunmap_atomic(d, KM_USER0);
  2106. tl = (unsigned short*)((char*)tl + l);
  2107. len -= l;
  2108. if (len == 0)
  2109. break;
  2110. }
  2111. put_unaligned(TT_END, tl++); /* Close the tag list */
  2112. cn_reply->id.idx = CN_IDX_DRBD;
  2113. cn_reply->id.val = CN_VAL_DRBD;
  2114. cn_reply->seq = atomic_inc_return(&drbd_nl_seq);
  2115. cn_reply->ack = 0; // not used here.
  2116. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  2117. (int)((char*)tl - (char*)reply->tag_list);
  2118. cn_reply->flags = 0;
  2119. reply->packet_type = P_dump_ee;
  2120. reply->minor = mdev_to_minor(mdev);
  2121. reply->ret_code = NO_ERROR;
  2122. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2123. kfree(cn_reply);
  2124. }
  2125. void drbd_bcast_sync_progress(struct drbd_conf *mdev)
  2126. {
  2127. char buffer[sizeof(struct cn_msg)+
  2128. sizeof(struct drbd_nl_cfg_reply)+
  2129. sizeof(struct sync_progress_tag_len_struct)+
  2130. sizeof(short int)];
  2131. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  2132. struct drbd_nl_cfg_reply *reply =
  2133. (struct drbd_nl_cfg_reply *)cn_reply->data;
  2134. unsigned short *tl = reply->tag_list;
  2135. unsigned long rs_left;
  2136. unsigned int res;
  2137. /* no local ref, no bitmap, no syncer progress, no broadcast. */
  2138. if (!get_ldev(mdev))
  2139. return;
  2140. drbd_get_syncer_progress(mdev, &rs_left, &res);
  2141. put_ldev(mdev);
  2142. tl = tl_add_int(tl, T_sync_progress, &res);
  2143. put_unaligned(TT_END, tl++); /* Close the tag list */
  2144. cn_reply->id.idx = CN_IDX_DRBD;
  2145. cn_reply->id.val = CN_VAL_DRBD;
  2146. cn_reply->seq = atomic_inc_return(&drbd_nl_seq);
  2147. cn_reply->ack = 0; /* not used here. */
  2148. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  2149. (int)((char *)tl - (char *)reply->tag_list);
  2150. cn_reply->flags = 0;
  2151. reply->packet_type = P_sync_progress;
  2152. reply->minor = mdev_to_minor(mdev);
  2153. reply->ret_code = NO_ERROR;
  2154. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2155. }
  2156. int __init drbd_nl_init(void)
  2157. {
  2158. static struct cb_id cn_id_drbd;
  2159. int err, try=10;
  2160. cn_id_drbd.val = CN_VAL_DRBD;
  2161. do {
  2162. cn_id_drbd.idx = cn_idx;
  2163. err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
  2164. if (!err)
  2165. break;
  2166. cn_idx = (cn_idx + CN_IDX_STEP);
  2167. } while (try--);
  2168. if (err) {
  2169. printk(KERN_ERR "drbd: cn_drbd failed to register\n");
  2170. return err;
  2171. }
  2172. return 0;
  2173. }
  2174. void drbd_nl_cleanup(void)
  2175. {
  2176. static struct cb_id cn_id_drbd;
  2177. cn_id_drbd.idx = cn_idx;
  2178. cn_id_drbd.val = CN_VAL_DRBD;
  2179. cn_del_callback(&cn_id_drbd);
  2180. }
  2181. void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
  2182. {
  2183. char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
  2184. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  2185. struct drbd_nl_cfg_reply *reply =
  2186. (struct drbd_nl_cfg_reply *)cn_reply->data;
  2187. int rr;
  2188. memset(buffer, 0, sizeof(buffer));
  2189. cn_reply->id = req->id;
  2190. cn_reply->seq = req->seq;
  2191. cn_reply->ack = req->ack + 1;
  2192. cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
  2193. cn_reply->flags = 0;
  2194. reply->packet_type = P_return_code_only;
  2195. reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
  2196. reply->ret_code = ret_code;
  2197. rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2198. if (rr && rr != -ESRCH)
  2199. printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
  2200. }