drbd_nl.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522
  1. /*
  2. drbd_nl.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/drbd.h>
  21. #include <linux/in.h>
  22. #include <linux/fs.h>
  23. #include <linux/file.h>
  24. #include <linux/slab.h>
  25. #include <linux/connector.h>
  26. #include <linux/blkpg.h>
  27. #include <linux/cpumask.h>
  28. #include "drbd_int.h"
  29. #include "drbd_req.h"
  30. #include "drbd_wrappers.h"
  31. #include <asm/unaligned.h>
  32. #include <linux/drbd_tag_magic.h>
  33. #include <linux/drbd_limits.h>
  34. #include <linux/compiler.h>
  35. #include <linux/kthread.h>
  36. static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
  37. static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
  38. static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
  39. /* see get_sb_bdev and bd_claim */
  40. static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
  41. /* Generate the tag_list to struct functions */
  42. #define NL_PACKET(name, number, fields) \
  43. static int name ## _from_tags(struct drbd_conf *mdev, \
  44. unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
  45. static int name ## _from_tags(struct drbd_conf *mdev, \
  46. unsigned short *tags, struct name *arg) \
  47. { \
  48. int tag; \
  49. int dlen; \
  50. \
  51. while ((tag = get_unaligned(tags++)) != TT_END) { \
  52. dlen = get_unaligned(tags++); \
  53. switch (tag_number(tag)) { \
  54. fields \
  55. default: \
  56. if (tag & T_MANDATORY) { \
  57. dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
  58. return 0; \
  59. } \
  60. } \
  61. tags = (unsigned short *)((char *)tags + dlen); \
  62. } \
  63. return 1; \
  64. }
  65. #define NL_INTEGER(pn, pr, member) \
  66. case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
  67. arg->member = get_unaligned((int *)(tags)); \
  68. break;
  69. #define NL_INT64(pn, pr, member) \
  70. case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
  71. arg->member = get_unaligned((u64 *)(tags)); \
  72. break;
  73. #define NL_BIT(pn, pr, member) \
  74. case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
  75. arg->member = *(char *)(tags) ? 1 : 0; \
  76. break;
  77. #define NL_STRING(pn, pr, member, len) \
  78. case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
  79. if (dlen > len) { \
  80. dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
  81. #member, dlen, (unsigned int)len); \
  82. return 0; \
  83. } \
  84. arg->member ## _len = dlen; \
  85. memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
  86. break;
  87. #include "linux/drbd_nl.h"
  88. /* Generate the struct to tag_list functions */
  89. #define NL_PACKET(name, number, fields) \
  90. static unsigned short* \
  91. name ## _to_tags(struct drbd_conf *mdev, \
  92. struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
  93. static unsigned short* \
  94. name ## _to_tags(struct drbd_conf *mdev, \
  95. struct name *arg, unsigned short *tags) \
  96. { \
  97. fields \
  98. return tags; \
  99. }
  100. #define NL_INTEGER(pn, pr, member) \
  101. put_unaligned(pn | pr | TT_INTEGER, tags++); \
  102. put_unaligned(sizeof(int), tags++); \
  103. put_unaligned(arg->member, (int *)tags); \
  104. tags = (unsigned short *)((char *)tags+sizeof(int));
  105. #define NL_INT64(pn, pr, member) \
  106. put_unaligned(pn | pr | TT_INT64, tags++); \
  107. put_unaligned(sizeof(u64), tags++); \
  108. put_unaligned(arg->member, (u64 *)tags); \
  109. tags = (unsigned short *)((char *)tags+sizeof(u64));
  110. #define NL_BIT(pn, pr, member) \
  111. put_unaligned(pn | pr | TT_BIT, tags++); \
  112. put_unaligned(sizeof(char), tags++); \
  113. *(char *)tags = arg->member; \
  114. tags = (unsigned short *)((char *)tags+sizeof(char));
  115. #define NL_STRING(pn, pr, member, len) \
  116. put_unaligned(pn | pr | TT_STRING, tags++); \
  117. put_unaligned(arg->member ## _len, tags++); \
  118. memcpy(tags, arg->member, arg->member ## _len); \
  119. tags = (unsigned short *)((char *)tags + arg->member ## _len);
  120. #include "linux/drbd_nl.h"
  121. void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
  122. void drbd_nl_send_reply(struct cn_msg *, int);
  123. int drbd_khelper(struct drbd_conf *mdev, char *cmd)
  124. {
  125. char *envp[] = { "HOME=/",
  126. "TERM=linux",
  127. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  128. NULL, /* Will be set to address family */
  129. NULL, /* Will be set to address */
  130. NULL };
  131. char mb[12], af[20], ad[60], *afs;
  132. char *argv[] = {usermode_helper, cmd, mb, NULL };
  133. int ret;
  134. snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
  135. if (get_net_conf(mdev)) {
  136. switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
  137. case AF_INET6:
  138. afs = "ipv6";
  139. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
  140. &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
  141. break;
  142. case AF_INET:
  143. afs = "ipv4";
  144. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
  145. &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
  146. break;
  147. default:
  148. afs = "ssocks";
  149. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
  150. &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
  151. }
  152. snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
  153. envp[3]=af;
  154. envp[4]=ad;
  155. put_net_conf(mdev);
  156. }
  157. dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
  158. drbd_bcast_ev_helper(mdev, cmd);
  159. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  160. if (ret)
  161. dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  162. usermode_helper, cmd, mb,
  163. (ret >> 8) & 0xff, ret);
  164. else
  165. dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  166. usermode_helper, cmd, mb,
  167. (ret >> 8) & 0xff, ret);
  168. if (ret < 0) /* Ignore any ERRNOs we got. */
  169. ret = 0;
  170. return ret;
  171. }
  172. enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
  173. {
  174. char *ex_to_string;
  175. int r;
  176. enum drbd_disk_state nps;
  177. enum drbd_fencing_p fp;
  178. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  179. if (get_ldev_if_state(mdev, D_CONSISTENT)) {
  180. fp = mdev->ldev->dc.fencing;
  181. put_ldev(mdev);
  182. } else {
  183. dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
  184. return mdev->state.pdsk;
  185. }
  186. r = drbd_khelper(mdev, "fence-peer");
  187. switch ((r>>8) & 0xff) {
  188. case 3: /* peer is inconsistent */
  189. ex_to_string = "peer is inconsistent or worse";
  190. nps = D_INCONSISTENT;
  191. break;
  192. case 4: /* peer got outdated, or was already outdated */
  193. ex_to_string = "peer was fenced";
  194. nps = D_OUTDATED;
  195. break;
  196. case 5: /* peer was down */
  197. if (mdev->state.disk == D_UP_TO_DATE) {
  198. /* we will(have) create(d) a new UUID anyways... */
  199. ex_to_string = "peer is unreachable, assumed to be dead";
  200. nps = D_OUTDATED;
  201. } else {
  202. ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
  203. nps = mdev->state.pdsk;
  204. }
  205. break;
  206. case 6: /* Peer is primary, voluntarily outdate myself.
  207. * This is useful when an unconnected R_SECONDARY is asked to
  208. * become R_PRIMARY, but finds the other peer being active. */
  209. ex_to_string = "peer is active";
  210. dev_warn(DEV, "Peer is primary, outdating myself.\n");
  211. nps = D_UNKNOWN;
  212. _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
  213. break;
  214. case 7:
  215. if (fp != FP_STONITH)
  216. dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
  217. ex_to_string = "peer was stonithed";
  218. nps = D_OUTDATED;
  219. break;
  220. default:
  221. /* The script is broken ... */
  222. nps = D_UNKNOWN;
  223. dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
  224. return nps;
  225. }
  226. dev_info(DEV, "fence-peer helper returned %d (%s)\n",
  227. (r>>8) & 0xff, ex_to_string);
  228. return nps;
  229. }
  230. static int _try_outdate_peer_async(void *data)
  231. {
  232. struct drbd_conf *mdev = (struct drbd_conf *)data;
  233. enum drbd_disk_state nps;
  234. nps = drbd_try_outdate_peer(mdev);
  235. drbd_request_state(mdev, NS(pdsk, nps));
  236. return 0;
  237. }
  238. void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
  239. {
  240. struct task_struct *opa;
  241. opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
  242. if (IS_ERR(opa))
  243. dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
  244. }
  245. int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
  246. {
  247. const int max_tries = 4;
  248. int r = 0;
  249. int try = 0;
  250. int forced = 0;
  251. union drbd_state mask, val;
  252. enum drbd_disk_state nps;
  253. if (new_role == R_PRIMARY)
  254. request_ping(mdev); /* Detect a dead peer ASAP */
  255. mutex_lock(&mdev->state_mutex);
  256. mask.i = 0; mask.role = R_MASK;
  257. val.i = 0; val.role = new_role;
  258. while (try++ < max_tries) {
  259. r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
  260. /* in case we first succeeded to outdate,
  261. * but now suddenly could establish a connection */
  262. if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
  263. val.pdsk = 0;
  264. mask.pdsk = 0;
  265. continue;
  266. }
  267. if (r == SS_NO_UP_TO_DATE_DISK && force &&
  268. (mdev->state.disk < D_UP_TO_DATE &&
  269. mdev->state.disk >= D_INCONSISTENT)) {
  270. mask.disk = D_MASK;
  271. val.disk = D_UP_TO_DATE;
  272. forced = 1;
  273. continue;
  274. }
  275. if (r == SS_NO_UP_TO_DATE_DISK &&
  276. mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
  277. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  278. nps = drbd_try_outdate_peer(mdev);
  279. if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
  280. val.disk = D_UP_TO_DATE;
  281. mask.disk = D_MASK;
  282. }
  283. val.pdsk = nps;
  284. mask.pdsk = D_MASK;
  285. continue;
  286. }
  287. if (r == SS_NOTHING_TO_DO)
  288. goto fail;
  289. if (r == SS_PRIMARY_NOP && mask.pdsk == 0) {
  290. nps = drbd_try_outdate_peer(mdev);
  291. if (force && nps > D_OUTDATED) {
  292. dev_warn(DEV, "Forced into split brain situation!\n");
  293. nps = D_OUTDATED;
  294. }
  295. mask.pdsk = D_MASK;
  296. val.pdsk = nps;
  297. continue;
  298. }
  299. if (r == SS_TWO_PRIMARIES) {
  300. /* Maybe the peer is detected as dead very soon...
  301. retry at most once more in this case. */
  302. __set_current_state(TASK_INTERRUPTIBLE);
  303. schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
  304. if (try < max_tries)
  305. try = max_tries - 1;
  306. continue;
  307. }
  308. if (r < SS_SUCCESS) {
  309. r = _drbd_request_state(mdev, mask, val,
  310. CS_VERBOSE + CS_WAIT_COMPLETE);
  311. if (r < SS_SUCCESS)
  312. goto fail;
  313. }
  314. break;
  315. }
  316. if (r < SS_SUCCESS)
  317. goto fail;
  318. if (forced)
  319. dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
  320. /* Wait until nothing is on the fly :) */
  321. wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
  322. if (new_role == R_SECONDARY) {
  323. set_disk_ro(mdev->vdisk, TRUE);
  324. if (get_ldev(mdev)) {
  325. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  326. put_ldev(mdev);
  327. }
  328. } else {
  329. if (get_net_conf(mdev)) {
  330. mdev->net_conf->want_lose = 0;
  331. put_net_conf(mdev);
  332. }
  333. set_disk_ro(mdev->vdisk, FALSE);
  334. if (get_ldev(mdev)) {
  335. if (((mdev->state.conn < C_CONNECTED ||
  336. mdev->state.pdsk <= D_FAILED)
  337. && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
  338. drbd_uuid_new_current(mdev);
  339. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  340. put_ldev(mdev);
  341. }
  342. }
  343. if ((new_role == R_SECONDARY) && get_ldev(mdev)) {
  344. drbd_al_to_on_disk_bm(mdev);
  345. put_ldev(mdev);
  346. }
  347. if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
  348. /* if this was forced, we should consider sync */
  349. if (forced)
  350. drbd_send_uuids(mdev);
  351. drbd_send_state(mdev);
  352. }
  353. drbd_md_sync(mdev);
  354. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  355. fail:
  356. mutex_unlock(&mdev->state_mutex);
  357. return r;
  358. }
  359. static struct drbd_conf *ensure_mdev(int minor, int create)
  360. {
  361. struct drbd_conf *mdev;
  362. if (minor >= minor_count)
  363. return NULL;
  364. mdev = minor_to_mdev(minor);
  365. if (!mdev && create) {
  366. struct gendisk *disk = NULL;
  367. mdev = drbd_new_device(minor);
  368. spin_lock_irq(&drbd_pp_lock);
  369. if (minor_table[minor] == NULL) {
  370. minor_table[minor] = mdev;
  371. disk = mdev->vdisk;
  372. mdev = NULL;
  373. } /* else: we lost the race */
  374. spin_unlock_irq(&drbd_pp_lock);
  375. if (disk) /* we won the race above */
  376. /* in case we ever add a drbd_delete_device(),
  377. * don't forget the del_gendisk! */
  378. add_disk(disk);
  379. else /* we lost the race above */
  380. drbd_free_mdev(mdev);
  381. mdev = minor_to_mdev(minor);
  382. }
  383. return mdev;
  384. }
  385. static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  386. struct drbd_nl_cfg_reply *reply)
  387. {
  388. struct primary primary_args;
  389. memset(&primary_args, 0, sizeof(struct primary));
  390. if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
  391. reply->ret_code = ERR_MANDATORY_TAG;
  392. return 0;
  393. }
  394. reply->ret_code =
  395. drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
  396. return 0;
  397. }
  398. static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  399. struct drbd_nl_cfg_reply *reply)
  400. {
  401. reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
  402. return 0;
  403. }
  404. /* initializes the md.*_offset members, so we are able to find
  405. * the on disk meta data */
  406. static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
  407. struct drbd_backing_dev *bdev)
  408. {
  409. sector_t md_size_sect = 0;
  410. switch (bdev->dc.meta_dev_idx) {
  411. default:
  412. /* v07 style fixed size indexed meta data */
  413. bdev->md.md_size_sect = MD_RESERVED_SECT;
  414. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  415. bdev->md.al_offset = MD_AL_OFFSET;
  416. bdev->md.bm_offset = MD_BM_OFFSET;
  417. break;
  418. case DRBD_MD_INDEX_FLEX_EXT:
  419. /* just occupy the full device; unit: sectors */
  420. bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
  421. bdev->md.md_offset = 0;
  422. bdev->md.al_offset = MD_AL_OFFSET;
  423. bdev->md.bm_offset = MD_BM_OFFSET;
  424. break;
  425. case DRBD_MD_INDEX_INTERNAL:
  426. case DRBD_MD_INDEX_FLEX_INT:
  427. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  428. /* al size is still fixed */
  429. bdev->md.al_offset = -MD_AL_MAX_SIZE;
  430. /* we need (slightly less than) ~ this much bitmap sectors: */
  431. md_size_sect = drbd_get_capacity(bdev->backing_bdev);
  432. md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
  433. md_size_sect = BM_SECT_TO_EXT(md_size_sect);
  434. md_size_sect = ALIGN(md_size_sect, 8);
  435. /* plus the "drbd meta data super block",
  436. * and the activity log; */
  437. md_size_sect += MD_BM_OFFSET;
  438. bdev->md.md_size_sect = md_size_sect;
  439. /* bitmap offset is adjusted by 'super' block size */
  440. bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
  441. break;
  442. }
  443. }
  444. char *ppsize(char *buf, unsigned long long size)
  445. {
  446. /* Needs 9 bytes at max. */
  447. static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
  448. int base = 0;
  449. while (size >= 10000) {
  450. /* shift + round */
  451. size = (size >> 10) + !!(size & (1<<9));
  452. base++;
  453. }
  454. sprintf(buf, "%lu %cB", (long)size, units[base]);
  455. return buf;
  456. }
  457. /* there is still a theoretical deadlock when called from receiver
  458. * on an D_INCONSISTENT R_PRIMARY:
  459. * remote READ does inc_ap_bio, receiver would need to receive answer
  460. * packet from remote to dec_ap_bio again.
  461. * receiver receive_sizes(), comes here,
  462. * waits for ap_bio_cnt == 0. -> deadlock.
  463. * but this cannot happen, actually, because:
  464. * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
  465. * (not connected, or bad/no disk on peer):
  466. * see drbd_fail_request_early, ap_bio_cnt is zero.
  467. * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
  468. * peer may not initiate a resize.
  469. */
  470. void drbd_suspend_io(struct drbd_conf *mdev)
  471. {
  472. set_bit(SUSPEND_IO, &mdev->flags);
  473. if (mdev->state.susp)
  474. return;
  475. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
  476. }
  477. void drbd_resume_io(struct drbd_conf *mdev)
  478. {
  479. clear_bit(SUSPEND_IO, &mdev->flags);
  480. wake_up(&mdev->misc_wait);
  481. }
  482. /**
  483. * drbd_determine_dev_size() - Sets the right device size obeying all constraints
  484. * @mdev: DRBD device.
  485. *
  486. * Returns 0 on success, negative return values indicate errors.
  487. * You should call drbd_md_sync() after calling this function.
  488. */
  489. enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
  490. {
  491. sector_t prev_first_sect, prev_size; /* previous meta location */
  492. sector_t la_size;
  493. sector_t size;
  494. char ppb[10];
  495. int md_moved, la_size_changed;
  496. enum determine_dev_size rv = unchanged;
  497. /* race:
  498. * application request passes inc_ap_bio,
  499. * but then cannot get an AL-reference.
  500. * this function later may wait on ap_bio_cnt == 0. -> deadlock.
  501. *
  502. * to avoid that:
  503. * Suspend IO right here.
  504. * still lock the act_log to not trigger ASSERTs there.
  505. */
  506. drbd_suspend_io(mdev);
  507. /* no wait necessary anymore, actually we could assert that */
  508. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  509. prev_first_sect = drbd_md_first_sector(mdev->ldev);
  510. prev_size = mdev->ldev->md.md_size_sect;
  511. la_size = mdev->ldev->md.la_size_sect;
  512. /* TODO: should only be some assert here, not (re)init... */
  513. drbd_md_set_sector_offsets(mdev, mdev->ldev);
  514. size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
  515. if (drbd_get_capacity(mdev->this_bdev) != size ||
  516. drbd_bm_capacity(mdev) != size) {
  517. int err;
  518. err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
  519. if (unlikely(err)) {
  520. /* currently there is only one error: ENOMEM! */
  521. size = drbd_bm_capacity(mdev)>>1;
  522. if (size == 0) {
  523. dev_err(DEV, "OUT OF MEMORY! "
  524. "Could not allocate bitmap!\n");
  525. } else {
  526. dev_err(DEV, "BM resizing failed. "
  527. "Leaving size unchanged at size = %lu KB\n",
  528. (unsigned long)size);
  529. }
  530. rv = dev_size_error;
  531. }
  532. /* racy, see comments above. */
  533. drbd_set_my_capacity(mdev, size);
  534. mdev->ldev->md.la_size_sect = size;
  535. dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
  536. (unsigned long long)size>>1);
  537. }
  538. if (rv == dev_size_error)
  539. goto out;
  540. la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
  541. md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
  542. || prev_size != mdev->ldev->md.md_size_sect;
  543. if (la_size_changed || md_moved) {
  544. drbd_al_shrink(mdev); /* All extents inactive. */
  545. dev_info(DEV, "Writing the whole bitmap, %s\n",
  546. la_size_changed && md_moved ? "size changed and md moved" :
  547. la_size_changed ? "size changed" : "md moved");
  548. rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */
  549. drbd_md_mark_dirty(mdev);
  550. }
  551. if (size > la_size)
  552. rv = grew;
  553. if (size < la_size)
  554. rv = shrunk;
  555. out:
  556. lc_unlock(mdev->act_log);
  557. wake_up(&mdev->al_wait);
  558. drbd_resume_io(mdev);
  559. return rv;
  560. }
  561. sector_t
  562. drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
  563. {
  564. sector_t p_size = mdev->p_size; /* partner's disk size. */
  565. sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
  566. sector_t m_size; /* my size */
  567. sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
  568. sector_t size = 0;
  569. m_size = drbd_get_max_capacity(bdev);
  570. if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
  571. dev_warn(DEV, "Resize while not connected was forced by the user!\n");
  572. p_size = m_size;
  573. }
  574. if (p_size && m_size) {
  575. size = min_t(sector_t, p_size, m_size);
  576. } else {
  577. if (la_size) {
  578. size = la_size;
  579. if (m_size && m_size < size)
  580. size = m_size;
  581. if (p_size && p_size < size)
  582. size = p_size;
  583. } else {
  584. if (m_size)
  585. size = m_size;
  586. if (p_size)
  587. size = p_size;
  588. }
  589. }
  590. if (size == 0)
  591. dev_err(DEV, "Both nodes diskless!\n");
  592. if (u_size) {
  593. if (u_size > size)
  594. dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
  595. (unsigned long)u_size>>1, (unsigned long)size>>1);
  596. else
  597. size = u_size;
  598. }
  599. return size;
  600. }
  601. /**
  602. * drbd_check_al_size() - Ensures that the AL is of the right size
  603. * @mdev: DRBD device.
  604. *
  605. * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
  606. * failed, and 0 on success. You should call drbd_md_sync() after you called
  607. * this function.
  608. */
  609. static int drbd_check_al_size(struct drbd_conf *mdev)
  610. {
  611. struct lru_cache *n, *t;
  612. struct lc_element *e;
  613. unsigned int in_use;
  614. int i;
  615. ERR_IF(mdev->sync_conf.al_extents < 7)
  616. mdev->sync_conf.al_extents = 127;
  617. if (mdev->act_log &&
  618. mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
  619. return 0;
  620. in_use = 0;
  621. t = mdev->act_log;
  622. n = lc_create("act_log", drbd_al_ext_cache,
  623. mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
  624. if (n == NULL) {
  625. dev_err(DEV, "Cannot allocate act_log lru!\n");
  626. return -ENOMEM;
  627. }
  628. spin_lock_irq(&mdev->al_lock);
  629. if (t) {
  630. for (i = 0; i < t->nr_elements; i++) {
  631. e = lc_element_by_index(t, i);
  632. if (e->refcnt)
  633. dev_err(DEV, "refcnt(%d)==%d\n",
  634. e->lc_number, e->refcnt);
  635. in_use += e->refcnt;
  636. }
  637. }
  638. if (!in_use)
  639. mdev->act_log = n;
  640. spin_unlock_irq(&mdev->al_lock);
  641. if (in_use) {
  642. dev_err(DEV, "Activity log still in use!\n");
  643. lc_destroy(n);
  644. return -EBUSY;
  645. } else {
  646. if (t)
  647. lc_destroy(t);
  648. }
  649. drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
  650. return 0;
  651. }
  652. void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local)
  653. {
  654. struct request_queue * const q = mdev->rq_queue;
  655. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  656. int max_segments = mdev->ldev->dc.max_bio_bvecs;
  657. max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
  658. blk_queue_max_hw_sectors(q, max_seg_s >> 9);
  659. blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
  660. blk_queue_max_segment_size(q, max_seg_s);
  661. blk_queue_logical_block_size(q, 512);
  662. blk_queue_segment_boundary(q, PAGE_SIZE-1);
  663. blk_stack_limits(&q->limits, &b->limits, 0);
  664. if (b->merge_bvec_fn)
  665. dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n",
  666. b->merge_bvec_fn);
  667. dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q));
  668. if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
  669. dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
  670. q->backing_dev_info.ra_pages,
  671. b->backing_dev_info.ra_pages);
  672. q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
  673. }
  674. }
  675. /* serialize deconfig (worker exiting, doing cleanup)
  676. * and reconfig (drbdsetup disk, drbdsetup net)
  677. *
  678. * Wait for a potentially exiting worker, then restart it,
  679. * or start a new one. Flush any pending work, there may still be an
  680. * after_state_change queued.
  681. */
  682. static void drbd_reconfig_start(struct drbd_conf *mdev)
  683. {
  684. wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
  685. wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
  686. drbd_thread_start(&mdev->worker);
  687. drbd_flush_workqueue(mdev);
  688. }
  689. /* if still unconfigured, stops worker again.
  690. * if configured now, clears CONFIG_PENDING.
  691. * wakes potential waiters */
  692. static void drbd_reconfig_done(struct drbd_conf *mdev)
  693. {
  694. spin_lock_irq(&mdev->req_lock);
  695. if (mdev->state.disk == D_DISKLESS &&
  696. mdev->state.conn == C_STANDALONE &&
  697. mdev->state.role == R_SECONDARY) {
  698. set_bit(DEVICE_DYING, &mdev->flags);
  699. drbd_thread_stop_nowait(&mdev->worker);
  700. } else
  701. clear_bit(CONFIG_PENDING, &mdev->flags);
  702. spin_unlock_irq(&mdev->req_lock);
  703. wake_up(&mdev->state_wait);
  704. }
  705. /* Make sure IO is suspended before calling this function(). */
  706. static void drbd_suspend_al(struct drbd_conf *mdev)
  707. {
  708. int s = 0;
  709. if (lc_try_lock(mdev->act_log)) {
  710. drbd_al_shrink(mdev);
  711. lc_unlock(mdev->act_log);
  712. } else {
  713. dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
  714. return;
  715. }
  716. spin_lock_irq(&mdev->req_lock);
  717. if (mdev->state.conn < C_CONNECTED)
  718. s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
  719. spin_unlock_irq(&mdev->req_lock);
  720. if (s)
  721. dev_info(DEV, "Suspended AL updates\n");
  722. }
  723. /* does always return 0;
  724. * interesting return code is in reply->ret_code */
  725. static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  726. struct drbd_nl_cfg_reply *reply)
  727. {
  728. enum drbd_ret_codes retcode;
  729. enum determine_dev_size dd;
  730. sector_t max_possible_sectors;
  731. sector_t min_md_device_sectors;
  732. struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
  733. struct inode *inode, *inode2;
  734. struct lru_cache *resync_lru = NULL;
  735. union drbd_state ns, os;
  736. int rv;
  737. int cp_discovered = 0;
  738. int logical_block_size;
  739. drbd_reconfig_start(mdev);
  740. /* if you want to reconfigure, please tear down first */
  741. if (mdev->state.disk > D_DISKLESS) {
  742. retcode = ERR_DISK_CONFIGURED;
  743. goto fail;
  744. }
  745. /* allocation not in the IO path, cqueue thread context */
  746. nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
  747. if (!nbc) {
  748. retcode = ERR_NOMEM;
  749. goto fail;
  750. }
  751. nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
  752. nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
  753. nbc->dc.fencing = DRBD_FENCING_DEF;
  754. nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
  755. if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
  756. retcode = ERR_MANDATORY_TAG;
  757. goto fail;
  758. }
  759. if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
  760. retcode = ERR_MD_IDX_INVALID;
  761. goto fail;
  762. }
  763. if (get_net_conf(mdev)) {
  764. int prot = mdev->net_conf->wire_protocol;
  765. put_net_conf(mdev);
  766. if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
  767. retcode = ERR_STONITH_AND_PROT_A;
  768. goto fail;
  769. }
  770. }
  771. nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
  772. if (IS_ERR(nbc->lo_file)) {
  773. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
  774. PTR_ERR(nbc->lo_file));
  775. nbc->lo_file = NULL;
  776. retcode = ERR_OPEN_DISK;
  777. goto fail;
  778. }
  779. inode = nbc->lo_file->f_dentry->d_inode;
  780. if (!S_ISBLK(inode->i_mode)) {
  781. retcode = ERR_DISK_NOT_BDEV;
  782. goto fail;
  783. }
  784. nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
  785. if (IS_ERR(nbc->md_file)) {
  786. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
  787. PTR_ERR(nbc->md_file));
  788. nbc->md_file = NULL;
  789. retcode = ERR_OPEN_MD_DISK;
  790. goto fail;
  791. }
  792. inode2 = nbc->md_file->f_dentry->d_inode;
  793. if (!S_ISBLK(inode2->i_mode)) {
  794. retcode = ERR_MD_NOT_BDEV;
  795. goto fail;
  796. }
  797. nbc->backing_bdev = inode->i_bdev;
  798. if (bd_claim(nbc->backing_bdev, mdev)) {
  799. printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
  800. nbc->backing_bdev, mdev,
  801. nbc->backing_bdev->bd_holder,
  802. nbc->backing_bdev->bd_contains->bd_holder,
  803. nbc->backing_bdev->bd_holders);
  804. retcode = ERR_BDCLAIM_DISK;
  805. goto fail;
  806. }
  807. resync_lru = lc_create("resync", drbd_bm_ext_cache,
  808. 61, sizeof(struct bm_extent),
  809. offsetof(struct bm_extent, lce));
  810. if (!resync_lru) {
  811. retcode = ERR_NOMEM;
  812. goto release_bdev_fail;
  813. }
  814. /* meta_dev_idx >= 0: external fixed size,
  815. * possibly multiple drbd sharing one meta device.
  816. * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is
  817. * not yet used by some other drbd minor!
  818. * (if you use drbd.conf + drbdadm,
  819. * that should check it for you already; but if you don't, or someone
  820. * fooled it, we need to double check here) */
  821. nbc->md_bdev = inode2->i_bdev;
  822. if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev
  823. : (void *) drbd_m_holder)) {
  824. retcode = ERR_BDCLAIM_MD_DISK;
  825. goto release_bdev_fail;
  826. }
  827. if ((nbc->backing_bdev == nbc->md_bdev) !=
  828. (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
  829. nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
  830. retcode = ERR_MD_IDX_INVALID;
  831. goto release_bdev2_fail;
  832. }
  833. /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
  834. drbd_md_set_sector_offsets(mdev, nbc);
  835. if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
  836. dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
  837. (unsigned long long) drbd_get_max_capacity(nbc),
  838. (unsigned long long) nbc->dc.disk_size);
  839. retcode = ERR_DISK_TO_SMALL;
  840. goto release_bdev2_fail;
  841. }
  842. if (nbc->dc.meta_dev_idx < 0) {
  843. max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
  844. /* at least one MB, otherwise it does not make sense */
  845. min_md_device_sectors = (2<<10);
  846. } else {
  847. max_possible_sectors = DRBD_MAX_SECTORS;
  848. min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
  849. }
  850. if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
  851. retcode = ERR_MD_DISK_TO_SMALL;
  852. dev_warn(DEV, "refusing attach: md-device too small, "
  853. "at least %llu sectors needed for this meta-disk type\n",
  854. (unsigned long long) min_md_device_sectors);
  855. goto release_bdev2_fail;
  856. }
  857. /* Make sure the new disk is big enough
  858. * (we may currently be R_PRIMARY with no local disk...) */
  859. if (drbd_get_max_capacity(nbc) <
  860. drbd_get_capacity(mdev->this_bdev)) {
  861. retcode = ERR_DISK_TO_SMALL;
  862. goto release_bdev2_fail;
  863. }
  864. nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
  865. if (nbc->known_size > max_possible_sectors) {
  866. dev_warn(DEV, "==> truncating very big lower level device "
  867. "to currently maximum possible %llu sectors <==\n",
  868. (unsigned long long) max_possible_sectors);
  869. if (nbc->dc.meta_dev_idx >= 0)
  870. dev_warn(DEV, "==>> using internal or flexible "
  871. "meta data may help <<==\n");
  872. }
  873. drbd_suspend_io(mdev);
  874. /* also wait for the last barrier ack. */
  875. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || mdev->state.susp);
  876. /* and for any other previously queued work */
  877. drbd_flush_workqueue(mdev);
  878. retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
  879. drbd_resume_io(mdev);
  880. if (retcode < SS_SUCCESS)
  881. goto release_bdev2_fail;
  882. if (!get_ldev_if_state(mdev, D_ATTACHING))
  883. goto force_diskless;
  884. drbd_md_set_sector_offsets(mdev, nbc);
  885. /* allocate a second IO page if logical_block_size != 512 */
  886. logical_block_size = bdev_logical_block_size(nbc->md_bdev);
  887. if (logical_block_size == 0)
  888. logical_block_size = MD_SECTOR_SIZE;
  889. if (logical_block_size != MD_SECTOR_SIZE) {
  890. if (!mdev->md_io_tmpp) {
  891. struct page *page = alloc_page(GFP_NOIO);
  892. if (!page)
  893. goto force_diskless_dec;
  894. dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
  895. logical_block_size, MD_SECTOR_SIZE);
  896. dev_warn(DEV, "Workaround engaged (has performance impact).\n");
  897. mdev->md_io_tmpp = page;
  898. }
  899. }
  900. if (!mdev->bitmap) {
  901. if (drbd_bm_init(mdev)) {
  902. retcode = ERR_NOMEM;
  903. goto force_diskless_dec;
  904. }
  905. }
  906. retcode = drbd_md_read(mdev, nbc);
  907. if (retcode != NO_ERROR)
  908. goto force_diskless_dec;
  909. if (mdev->state.conn < C_CONNECTED &&
  910. mdev->state.role == R_PRIMARY &&
  911. (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
  912. dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
  913. (unsigned long long)mdev->ed_uuid);
  914. retcode = ERR_DATA_NOT_CURRENT;
  915. goto force_diskless_dec;
  916. }
  917. /* Since we are diskless, fix the activity log first... */
  918. if (drbd_check_al_size(mdev)) {
  919. retcode = ERR_NOMEM;
  920. goto force_diskless_dec;
  921. }
  922. /* Prevent shrinking of consistent devices ! */
  923. if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
  924. drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
  925. dev_warn(DEV, "refusing to truncate a consistent device\n");
  926. retcode = ERR_DISK_TO_SMALL;
  927. goto force_diskless_dec;
  928. }
  929. if (!drbd_al_read_log(mdev, nbc)) {
  930. retcode = ERR_IO_MD_DISK;
  931. goto force_diskless_dec;
  932. }
  933. /* Reset the "barriers don't work" bits here, then force meta data to
  934. * be written, to ensure we determine if barriers are supported. */
  935. if (nbc->dc.no_md_flush)
  936. set_bit(MD_NO_BARRIER, &mdev->flags);
  937. else
  938. clear_bit(MD_NO_BARRIER, &mdev->flags);
  939. /* Point of no return reached.
  940. * Devices and memory are no longer released by error cleanup below.
  941. * now mdev takes over responsibility, and the state engine should
  942. * clean it up somewhere. */
  943. D_ASSERT(mdev->ldev == NULL);
  944. mdev->ldev = nbc;
  945. mdev->resync = resync_lru;
  946. nbc = NULL;
  947. resync_lru = NULL;
  948. mdev->write_ordering = WO_bio_barrier;
  949. drbd_bump_write_ordering(mdev, WO_bio_barrier);
  950. if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
  951. set_bit(CRASHED_PRIMARY, &mdev->flags);
  952. else
  953. clear_bit(CRASHED_PRIMARY, &mdev->flags);
  954. if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  955. !(mdev->state.role == R_PRIMARY && mdev->state.susp &&
  956. mdev->sync_conf.on_no_data == OND_SUSPEND_IO)) {
  957. set_bit(CRASHED_PRIMARY, &mdev->flags);
  958. cp_discovered = 1;
  959. }
  960. mdev->send_cnt = 0;
  961. mdev->recv_cnt = 0;
  962. mdev->read_cnt = 0;
  963. mdev->writ_cnt = 0;
  964. drbd_setup_queue_param(mdev, mdev->state.conn == C_CONNECTED &&
  965. mdev->agreed_pro_version < 95 ?
  966. DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE);
  967. /* If I am currently not R_PRIMARY,
  968. * but meta data primary indicator is set,
  969. * I just now recover from a hard crash,
  970. * and have been R_PRIMARY before that crash.
  971. *
  972. * Now, if I had no connection before that crash
  973. * (have been degraded R_PRIMARY), chances are that
  974. * I won't find my peer now either.
  975. *
  976. * In that case, and _only_ in that case,
  977. * we use the degr-wfc-timeout instead of the default,
  978. * so we can automatically recover from a crash of a
  979. * degraded but active "cluster" after a certain timeout.
  980. */
  981. clear_bit(USE_DEGR_WFC_T, &mdev->flags);
  982. if (mdev->state.role != R_PRIMARY &&
  983. drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  984. !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
  985. set_bit(USE_DEGR_WFC_T, &mdev->flags);
  986. dd = drbd_determin_dev_size(mdev, 0);
  987. if (dd == dev_size_error) {
  988. retcode = ERR_NOMEM_BITMAP;
  989. goto force_diskless_dec;
  990. } else if (dd == grew)
  991. set_bit(RESYNC_AFTER_NEG, &mdev->flags);
  992. if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
  993. dev_info(DEV, "Assuming that all blocks are out of sync "
  994. "(aka FullSync)\n");
  995. if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
  996. retcode = ERR_IO_MD_DISK;
  997. goto force_diskless_dec;
  998. }
  999. } else {
  1000. if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
  1001. retcode = ERR_IO_MD_DISK;
  1002. goto force_diskless_dec;
  1003. }
  1004. }
  1005. if (cp_discovered) {
  1006. drbd_al_apply_to_bm(mdev);
  1007. drbd_al_to_on_disk_bm(mdev);
  1008. }
  1009. if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
  1010. drbd_suspend_al(mdev); /* IO is still suspended here... */
  1011. spin_lock_irq(&mdev->req_lock);
  1012. os = mdev->state;
  1013. ns.i = os.i;
  1014. /* If MDF_CONSISTENT is not set go into inconsistent state,
  1015. otherwise investigate MDF_WasUpToDate...
  1016. If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
  1017. otherwise into D_CONSISTENT state.
  1018. */
  1019. if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
  1020. if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
  1021. ns.disk = D_CONSISTENT;
  1022. else
  1023. ns.disk = D_OUTDATED;
  1024. } else {
  1025. ns.disk = D_INCONSISTENT;
  1026. }
  1027. if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
  1028. ns.pdsk = D_OUTDATED;
  1029. if ( ns.disk == D_CONSISTENT &&
  1030. (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
  1031. ns.disk = D_UP_TO_DATE;
  1032. /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
  1033. MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
  1034. this point, because drbd_request_state() modifies these
  1035. flags. */
  1036. /* In case we are C_CONNECTED postpone any decision on the new disk
  1037. state after the negotiation phase. */
  1038. if (mdev->state.conn == C_CONNECTED) {
  1039. mdev->new_state_tmp.i = ns.i;
  1040. ns.i = os.i;
  1041. ns.disk = D_NEGOTIATING;
  1042. /* We expect to receive up-to-date UUIDs soon.
  1043. To avoid a race in receive_state, free p_uuid while
  1044. holding req_lock. I.e. atomic with the state change */
  1045. kfree(mdev->p_uuid);
  1046. mdev->p_uuid = NULL;
  1047. }
  1048. rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
  1049. ns = mdev->state;
  1050. spin_unlock_irq(&mdev->req_lock);
  1051. if (rv < SS_SUCCESS)
  1052. goto force_diskless_dec;
  1053. if (mdev->state.role == R_PRIMARY)
  1054. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  1055. else
  1056. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  1057. drbd_md_mark_dirty(mdev);
  1058. drbd_md_sync(mdev);
  1059. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1060. put_ldev(mdev);
  1061. reply->ret_code = retcode;
  1062. drbd_reconfig_done(mdev);
  1063. return 0;
  1064. force_diskless_dec:
  1065. put_ldev(mdev);
  1066. force_diskless:
  1067. drbd_force_state(mdev, NS(disk, D_DISKLESS));
  1068. drbd_md_sync(mdev);
  1069. release_bdev2_fail:
  1070. if (nbc)
  1071. bd_release(nbc->md_bdev);
  1072. release_bdev_fail:
  1073. if (nbc)
  1074. bd_release(nbc->backing_bdev);
  1075. fail:
  1076. if (nbc) {
  1077. if (nbc->lo_file)
  1078. fput(nbc->lo_file);
  1079. if (nbc->md_file)
  1080. fput(nbc->md_file);
  1081. kfree(nbc);
  1082. }
  1083. lc_destroy(resync_lru);
  1084. reply->ret_code = retcode;
  1085. drbd_reconfig_done(mdev);
  1086. return 0;
  1087. }
  1088. static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1089. struct drbd_nl_cfg_reply *reply)
  1090. {
  1091. reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
  1092. return 0;
  1093. }
  1094. static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1095. struct drbd_nl_cfg_reply *reply)
  1096. {
  1097. int i, ns;
  1098. enum drbd_ret_codes retcode;
  1099. struct net_conf *new_conf = NULL;
  1100. struct crypto_hash *tfm = NULL;
  1101. struct crypto_hash *integrity_w_tfm = NULL;
  1102. struct crypto_hash *integrity_r_tfm = NULL;
  1103. struct hlist_head *new_tl_hash = NULL;
  1104. struct hlist_head *new_ee_hash = NULL;
  1105. struct drbd_conf *odev;
  1106. char hmac_name[CRYPTO_MAX_ALG_NAME];
  1107. void *int_dig_out = NULL;
  1108. void *int_dig_in = NULL;
  1109. void *int_dig_vv = NULL;
  1110. struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
  1111. drbd_reconfig_start(mdev);
  1112. if (mdev->state.conn > C_STANDALONE) {
  1113. retcode = ERR_NET_CONFIGURED;
  1114. goto fail;
  1115. }
  1116. /* allocation not in the IO path, cqueue thread context */
  1117. new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
  1118. if (!new_conf) {
  1119. retcode = ERR_NOMEM;
  1120. goto fail;
  1121. }
  1122. new_conf->timeout = DRBD_TIMEOUT_DEF;
  1123. new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
  1124. new_conf->ping_int = DRBD_PING_INT_DEF;
  1125. new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
  1126. new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
  1127. new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
  1128. new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
  1129. new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
  1130. new_conf->ko_count = DRBD_KO_COUNT_DEF;
  1131. new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
  1132. new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
  1133. new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
  1134. new_conf->want_lose = 0;
  1135. new_conf->two_primaries = 0;
  1136. new_conf->wire_protocol = DRBD_PROT_C;
  1137. new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
  1138. new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
  1139. if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
  1140. retcode = ERR_MANDATORY_TAG;
  1141. goto fail;
  1142. }
  1143. if (new_conf->two_primaries
  1144. && (new_conf->wire_protocol != DRBD_PROT_C)) {
  1145. retcode = ERR_NOT_PROTO_C;
  1146. goto fail;
  1147. }
  1148. if (get_ldev(mdev)) {
  1149. enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
  1150. put_ldev(mdev);
  1151. if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
  1152. retcode = ERR_STONITH_AND_PROT_A;
  1153. goto fail;
  1154. }
  1155. }
  1156. if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
  1157. retcode = ERR_DISCARD;
  1158. goto fail;
  1159. }
  1160. retcode = NO_ERROR;
  1161. new_my_addr = (struct sockaddr *)&new_conf->my_addr;
  1162. new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
  1163. for (i = 0; i < minor_count; i++) {
  1164. odev = minor_to_mdev(i);
  1165. if (!odev || odev == mdev)
  1166. continue;
  1167. if (get_net_conf(odev)) {
  1168. taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
  1169. if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
  1170. !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
  1171. retcode = ERR_LOCAL_ADDR;
  1172. taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
  1173. if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
  1174. !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
  1175. retcode = ERR_PEER_ADDR;
  1176. put_net_conf(odev);
  1177. if (retcode != NO_ERROR)
  1178. goto fail;
  1179. }
  1180. }
  1181. if (new_conf->cram_hmac_alg[0] != 0) {
  1182. snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
  1183. new_conf->cram_hmac_alg);
  1184. tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
  1185. if (IS_ERR(tfm)) {
  1186. tfm = NULL;
  1187. retcode = ERR_AUTH_ALG;
  1188. goto fail;
  1189. }
  1190. if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
  1191. retcode = ERR_AUTH_ALG_ND;
  1192. goto fail;
  1193. }
  1194. }
  1195. if (new_conf->integrity_alg[0]) {
  1196. integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1197. if (IS_ERR(integrity_w_tfm)) {
  1198. integrity_w_tfm = NULL;
  1199. retcode=ERR_INTEGRITY_ALG;
  1200. goto fail;
  1201. }
  1202. if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
  1203. retcode=ERR_INTEGRITY_ALG_ND;
  1204. goto fail;
  1205. }
  1206. integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1207. if (IS_ERR(integrity_r_tfm)) {
  1208. integrity_r_tfm = NULL;
  1209. retcode=ERR_INTEGRITY_ALG;
  1210. goto fail;
  1211. }
  1212. }
  1213. ns = new_conf->max_epoch_size/8;
  1214. if (mdev->tl_hash_s != ns) {
  1215. new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
  1216. if (!new_tl_hash) {
  1217. retcode = ERR_NOMEM;
  1218. goto fail;
  1219. }
  1220. }
  1221. ns = new_conf->max_buffers/8;
  1222. if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
  1223. new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
  1224. if (!new_ee_hash) {
  1225. retcode = ERR_NOMEM;
  1226. goto fail;
  1227. }
  1228. }
  1229. ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
  1230. if (integrity_w_tfm) {
  1231. i = crypto_hash_digestsize(integrity_w_tfm);
  1232. int_dig_out = kmalloc(i, GFP_KERNEL);
  1233. if (!int_dig_out) {
  1234. retcode = ERR_NOMEM;
  1235. goto fail;
  1236. }
  1237. int_dig_in = kmalloc(i, GFP_KERNEL);
  1238. if (!int_dig_in) {
  1239. retcode = ERR_NOMEM;
  1240. goto fail;
  1241. }
  1242. int_dig_vv = kmalloc(i, GFP_KERNEL);
  1243. if (!int_dig_vv) {
  1244. retcode = ERR_NOMEM;
  1245. goto fail;
  1246. }
  1247. }
  1248. if (!mdev->bitmap) {
  1249. if(drbd_bm_init(mdev)) {
  1250. retcode = ERR_NOMEM;
  1251. goto fail;
  1252. }
  1253. }
  1254. drbd_flush_workqueue(mdev);
  1255. spin_lock_irq(&mdev->req_lock);
  1256. if (mdev->net_conf != NULL) {
  1257. retcode = ERR_NET_CONFIGURED;
  1258. spin_unlock_irq(&mdev->req_lock);
  1259. goto fail;
  1260. }
  1261. mdev->net_conf = new_conf;
  1262. mdev->send_cnt = 0;
  1263. mdev->recv_cnt = 0;
  1264. if (new_tl_hash) {
  1265. kfree(mdev->tl_hash);
  1266. mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
  1267. mdev->tl_hash = new_tl_hash;
  1268. }
  1269. if (new_ee_hash) {
  1270. kfree(mdev->ee_hash);
  1271. mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
  1272. mdev->ee_hash = new_ee_hash;
  1273. }
  1274. crypto_free_hash(mdev->cram_hmac_tfm);
  1275. mdev->cram_hmac_tfm = tfm;
  1276. crypto_free_hash(mdev->integrity_w_tfm);
  1277. mdev->integrity_w_tfm = integrity_w_tfm;
  1278. crypto_free_hash(mdev->integrity_r_tfm);
  1279. mdev->integrity_r_tfm = integrity_r_tfm;
  1280. kfree(mdev->int_dig_out);
  1281. kfree(mdev->int_dig_in);
  1282. kfree(mdev->int_dig_vv);
  1283. mdev->int_dig_out=int_dig_out;
  1284. mdev->int_dig_in=int_dig_in;
  1285. mdev->int_dig_vv=int_dig_vv;
  1286. retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
  1287. spin_unlock_irq(&mdev->req_lock);
  1288. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1289. reply->ret_code = retcode;
  1290. drbd_reconfig_done(mdev);
  1291. return 0;
  1292. fail:
  1293. kfree(int_dig_out);
  1294. kfree(int_dig_in);
  1295. kfree(int_dig_vv);
  1296. crypto_free_hash(tfm);
  1297. crypto_free_hash(integrity_w_tfm);
  1298. crypto_free_hash(integrity_r_tfm);
  1299. kfree(new_tl_hash);
  1300. kfree(new_ee_hash);
  1301. kfree(new_conf);
  1302. reply->ret_code = retcode;
  1303. drbd_reconfig_done(mdev);
  1304. return 0;
  1305. }
  1306. static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1307. struct drbd_nl_cfg_reply *reply)
  1308. {
  1309. int retcode;
  1310. retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
  1311. if (retcode == SS_NOTHING_TO_DO)
  1312. goto done;
  1313. else if (retcode == SS_ALREADY_STANDALONE)
  1314. goto done;
  1315. else if (retcode == SS_PRIMARY_NOP) {
  1316. /* Our statche checking code wants to see the peer outdated. */
  1317. retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
  1318. pdsk, D_OUTDATED));
  1319. } else if (retcode == SS_CW_FAILED_BY_PEER) {
  1320. /* The peer probably wants to see us outdated. */
  1321. retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
  1322. disk, D_OUTDATED),
  1323. CS_ORDERED);
  1324. if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
  1325. drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
  1326. retcode = SS_SUCCESS;
  1327. }
  1328. }
  1329. if (retcode < SS_SUCCESS)
  1330. goto fail;
  1331. if (wait_event_interruptible(mdev->state_wait,
  1332. mdev->state.conn != C_DISCONNECTING)) {
  1333. /* Do not test for mdev->state.conn == C_STANDALONE, since
  1334. someone else might connect us in the mean time! */
  1335. retcode = ERR_INTR;
  1336. goto fail;
  1337. }
  1338. done:
  1339. retcode = NO_ERROR;
  1340. fail:
  1341. drbd_md_sync(mdev);
  1342. reply->ret_code = retcode;
  1343. return 0;
  1344. }
  1345. void resync_after_online_grow(struct drbd_conf *mdev)
  1346. {
  1347. int iass; /* I am sync source */
  1348. dev_info(DEV, "Resync of new storage after online grow\n");
  1349. if (mdev->state.role != mdev->state.peer)
  1350. iass = (mdev->state.role == R_PRIMARY);
  1351. else
  1352. iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
  1353. if (iass)
  1354. drbd_start_resync(mdev, C_SYNC_SOURCE);
  1355. else
  1356. _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
  1357. }
  1358. static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1359. struct drbd_nl_cfg_reply *reply)
  1360. {
  1361. struct resize rs;
  1362. int retcode = NO_ERROR;
  1363. enum determine_dev_size dd;
  1364. enum dds_flags ddsf;
  1365. memset(&rs, 0, sizeof(struct resize));
  1366. if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
  1367. retcode = ERR_MANDATORY_TAG;
  1368. goto fail;
  1369. }
  1370. if (mdev->state.conn > C_CONNECTED) {
  1371. retcode = ERR_RESIZE_RESYNC;
  1372. goto fail;
  1373. }
  1374. if (mdev->state.role == R_SECONDARY &&
  1375. mdev->state.peer == R_SECONDARY) {
  1376. retcode = ERR_NO_PRIMARY;
  1377. goto fail;
  1378. }
  1379. if (!get_ldev(mdev)) {
  1380. retcode = ERR_NO_DISK;
  1381. goto fail;
  1382. }
  1383. if (rs.no_resync && mdev->agreed_pro_version < 93) {
  1384. retcode = ERR_NEED_APV_93;
  1385. goto fail;
  1386. }
  1387. if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
  1388. mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
  1389. mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
  1390. ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
  1391. dd = drbd_determin_dev_size(mdev, ddsf);
  1392. drbd_md_sync(mdev);
  1393. put_ldev(mdev);
  1394. if (dd == dev_size_error) {
  1395. retcode = ERR_NOMEM_BITMAP;
  1396. goto fail;
  1397. }
  1398. if (mdev->state.conn == C_CONNECTED) {
  1399. if (dd == grew)
  1400. set_bit(RESIZE_PENDING, &mdev->flags);
  1401. drbd_send_uuids(mdev);
  1402. drbd_send_sizes(mdev, 1, ddsf);
  1403. }
  1404. fail:
  1405. reply->ret_code = retcode;
  1406. return 0;
  1407. }
  1408. static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1409. struct drbd_nl_cfg_reply *reply)
  1410. {
  1411. int retcode = NO_ERROR;
  1412. int err;
  1413. int ovr; /* online verify running */
  1414. int rsr; /* re-sync running */
  1415. struct crypto_hash *verify_tfm = NULL;
  1416. struct crypto_hash *csums_tfm = NULL;
  1417. struct syncer_conf sc;
  1418. cpumask_var_t new_cpu_mask;
  1419. int *rs_plan_s = NULL;
  1420. int fifo_size;
  1421. if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
  1422. retcode = ERR_NOMEM;
  1423. goto fail;
  1424. }
  1425. if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
  1426. memset(&sc, 0, sizeof(struct syncer_conf));
  1427. sc.rate = DRBD_RATE_DEF;
  1428. sc.after = DRBD_AFTER_DEF;
  1429. sc.al_extents = DRBD_AL_EXTENTS_DEF;
  1430. sc.on_no_data = DRBD_ON_NO_DATA_DEF;
  1431. sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
  1432. sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
  1433. sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
  1434. sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
  1435. sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
  1436. } else
  1437. memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
  1438. if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
  1439. retcode = ERR_MANDATORY_TAG;
  1440. goto fail;
  1441. }
  1442. /* re-sync running */
  1443. rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
  1444. mdev->state.conn == C_SYNC_TARGET ||
  1445. mdev->state.conn == C_PAUSED_SYNC_S ||
  1446. mdev->state.conn == C_PAUSED_SYNC_T );
  1447. if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
  1448. retcode = ERR_CSUMS_RESYNC_RUNNING;
  1449. goto fail;
  1450. }
  1451. if (!rsr && sc.csums_alg[0]) {
  1452. csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
  1453. if (IS_ERR(csums_tfm)) {
  1454. csums_tfm = NULL;
  1455. retcode = ERR_CSUMS_ALG;
  1456. goto fail;
  1457. }
  1458. if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
  1459. retcode = ERR_CSUMS_ALG_ND;
  1460. goto fail;
  1461. }
  1462. }
  1463. /* online verify running */
  1464. ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
  1465. if (ovr) {
  1466. if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
  1467. retcode = ERR_VERIFY_RUNNING;
  1468. goto fail;
  1469. }
  1470. }
  1471. if (!ovr && sc.verify_alg[0]) {
  1472. verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
  1473. if (IS_ERR(verify_tfm)) {
  1474. verify_tfm = NULL;
  1475. retcode = ERR_VERIFY_ALG;
  1476. goto fail;
  1477. }
  1478. if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
  1479. retcode = ERR_VERIFY_ALG_ND;
  1480. goto fail;
  1481. }
  1482. }
  1483. /* silently ignore cpu mask on UP kernel */
  1484. if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
  1485. err = __bitmap_parse(sc.cpu_mask, 32, 0,
  1486. cpumask_bits(new_cpu_mask), nr_cpu_ids);
  1487. if (err) {
  1488. dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
  1489. retcode = ERR_CPU_MASK_PARSE;
  1490. goto fail;
  1491. }
  1492. }
  1493. ERR_IF (sc.rate < 1) sc.rate = 1;
  1494. ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
  1495. #define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
  1496. if (sc.al_extents > AL_MAX) {
  1497. dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
  1498. sc.al_extents = AL_MAX;
  1499. }
  1500. #undef AL_MAX
  1501. /* to avoid spurious errors when configuring minors before configuring
  1502. * the minors they depend on: if necessary, first create the minor we
  1503. * depend on */
  1504. if (sc.after >= 0)
  1505. ensure_mdev(sc.after, 1);
  1506. /* most sanity checks done, try to assign the new sync-after
  1507. * dependency. need to hold the global lock in there,
  1508. * to avoid a race in the dependency loop check. */
  1509. retcode = drbd_alter_sa(mdev, sc.after);
  1510. if (retcode != NO_ERROR)
  1511. goto fail;
  1512. fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
  1513. if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
  1514. rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
  1515. if (!rs_plan_s) {
  1516. dev_err(DEV, "kmalloc of fifo_buffer failed");
  1517. retcode = ERR_NOMEM;
  1518. goto fail;
  1519. }
  1520. }
  1521. /* ok, assign the rest of it as well.
  1522. * lock against receive_SyncParam() */
  1523. spin_lock(&mdev->peer_seq_lock);
  1524. mdev->sync_conf = sc;
  1525. if (!rsr) {
  1526. crypto_free_hash(mdev->csums_tfm);
  1527. mdev->csums_tfm = csums_tfm;
  1528. csums_tfm = NULL;
  1529. }
  1530. if (!ovr) {
  1531. crypto_free_hash(mdev->verify_tfm);
  1532. mdev->verify_tfm = verify_tfm;
  1533. verify_tfm = NULL;
  1534. }
  1535. if (fifo_size != mdev->rs_plan_s.size) {
  1536. kfree(mdev->rs_plan_s.values);
  1537. mdev->rs_plan_s.values = rs_plan_s;
  1538. mdev->rs_plan_s.size = fifo_size;
  1539. mdev->rs_planed = 0;
  1540. rs_plan_s = NULL;
  1541. }
  1542. spin_unlock(&mdev->peer_seq_lock);
  1543. if (get_ldev(mdev)) {
  1544. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  1545. drbd_al_shrink(mdev);
  1546. err = drbd_check_al_size(mdev);
  1547. lc_unlock(mdev->act_log);
  1548. wake_up(&mdev->al_wait);
  1549. put_ldev(mdev);
  1550. drbd_md_sync(mdev);
  1551. if (err) {
  1552. retcode = ERR_NOMEM;
  1553. goto fail;
  1554. }
  1555. }
  1556. if (mdev->state.conn >= C_CONNECTED)
  1557. drbd_send_sync_param(mdev, &sc);
  1558. if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
  1559. cpumask_copy(mdev->cpu_mask, new_cpu_mask);
  1560. drbd_calc_cpu_mask(mdev);
  1561. mdev->receiver.reset_cpu_mask = 1;
  1562. mdev->asender.reset_cpu_mask = 1;
  1563. mdev->worker.reset_cpu_mask = 1;
  1564. }
  1565. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1566. fail:
  1567. kfree(rs_plan_s);
  1568. free_cpumask_var(new_cpu_mask);
  1569. crypto_free_hash(csums_tfm);
  1570. crypto_free_hash(verify_tfm);
  1571. reply->ret_code = retcode;
  1572. return 0;
  1573. }
  1574. static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1575. struct drbd_nl_cfg_reply *reply)
  1576. {
  1577. int retcode;
  1578. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
  1579. if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
  1580. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1581. while (retcode == SS_NEED_CONNECTION) {
  1582. spin_lock_irq(&mdev->req_lock);
  1583. if (mdev->state.conn < C_CONNECTED)
  1584. retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
  1585. spin_unlock_irq(&mdev->req_lock);
  1586. if (retcode != SS_NEED_CONNECTION)
  1587. break;
  1588. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1589. }
  1590. reply->ret_code = retcode;
  1591. return 0;
  1592. }
  1593. static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
  1594. {
  1595. int rv;
  1596. rv = drbd_bmio_set_n_write(mdev);
  1597. drbd_suspend_al(mdev);
  1598. return rv;
  1599. }
  1600. static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1601. struct drbd_nl_cfg_reply *reply)
  1602. {
  1603. int retcode;
  1604. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
  1605. if (retcode < SS_SUCCESS) {
  1606. if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
  1607. /* The peer will get a resync upon connect anyways. Just make that
  1608. into a full resync. */
  1609. retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
  1610. if (retcode >= SS_SUCCESS) {
  1611. /* open coded drbd_bitmap_io() */
  1612. if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
  1613. "set_n_write from invalidate_peer"))
  1614. retcode = ERR_IO_MD_DISK;
  1615. }
  1616. } else
  1617. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
  1618. }
  1619. reply->ret_code = retcode;
  1620. return 0;
  1621. }
  1622. static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1623. struct drbd_nl_cfg_reply *reply)
  1624. {
  1625. int retcode = NO_ERROR;
  1626. if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
  1627. retcode = ERR_PAUSE_IS_SET;
  1628. reply->ret_code = retcode;
  1629. return 0;
  1630. }
  1631. static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1632. struct drbd_nl_cfg_reply *reply)
  1633. {
  1634. int retcode = NO_ERROR;
  1635. if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO)
  1636. retcode = ERR_PAUSE_IS_CLEAR;
  1637. reply->ret_code = retcode;
  1638. return 0;
  1639. }
  1640. static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1641. struct drbd_nl_cfg_reply *reply)
  1642. {
  1643. reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
  1644. return 0;
  1645. }
  1646. static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1647. struct drbd_nl_cfg_reply *reply)
  1648. {
  1649. if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
  1650. drbd_uuid_new_current(mdev);
  1651. clear_bit(NEW_CUR_UUID, &mdev->flags);
  1652. drbd_md_sync(mdev);
  1653. }
  1654. drbd_suspend_io(mdev);
  1655. reply->ret_code = drbd_request_state(mdev, NS(susp, 0));
  1656. if (reply->ret_code == SS_SUCCESS) {
  1657. if (mdev->state.conn < C_CONNECTED)
  1658. tl_clear(mdev);
  1659. if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
  1660. tl_restart(mdev, fail_frozen_disk_io);
  1661. }
  1662. drbd_resume_io(mdev);
  1663. return 0;
  1664. }
  1665. static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1666. struct drbd_nl_cfg_reply *reply)
  1667. {
  1668. reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
  1669. return 0;
  1670. }
  1671. static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1672. struct drbd_nl_cfg_reply *reply)
  1673. {
  1674. unsigned short *tl;
  1675. tl = reply->tag_list;
  1676. if (get_ldev(mdev)) {
  1677. tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
  1678. put_ldev(mdev);
  1679. }
  1680. if (get_net_conf(mdev)) {
  1681. tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
  1682. put_net_conf(mdev);
  1683. }
  1684. tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
  1685. put_unaligned(TT_END, tl++); /* Close the tag list */
  1686. return (int)((char *)tl - (char *)reply->tag_list);
  1687. }
  1688. static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1689. struct drbd_nl_cfg_reply *reply)
  1690. {
  1691. unsigned short *tl = reply->tag_list;
  1692. union drbd_state s = mdev->state;
  1693. unsigned long rs_left;
  1694. unsigned int res;
  1695. tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
  1696. /* no local ref, no bitmap, no syncer progress. */
  1697. if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
  1698. if (get_ldev(mdev)) {
  1699. drbd_get_syncer_progress(mdev, &rs_left, &res);
  1700. tl = tl_add_int(tl, T_sync_progress, &res);
  1701. put_ldev(mdev);
  1702. }
  1703. }
  1704. put_unaligned(TT_END, tl++); /* Close the tag list */
  1705. return (int)((char *)tl - (char *)reply->tag_list);
  1706. }
  1707. static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1708. struct drbd_nl_cfg_reply *reply)
  1709. {
  1710. unsigned short *tl;
  1711. tl = reply->tag_list;
  1712. if (get_ldev(mdev)) {
  1713. tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
  1714. tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
  1715. put_ldev(mdev);
  1716. }
  1717. put_unaligned(TT_END, tl++); /* Close the tag list */
  1718. return (int)((char *)tl - (char *)reply->tag_list);
  1719. }
  1720. /**
  1721. * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
  1722. * @mdev: DRBD device.
  1723. * @nlp: Netlink/connector packet from drbdsetup
  1724. * @reply: Reply packet for drbdsetup
  1725. */
  1726. static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1727. struct drbd_nl_cfg_reply *reply)
  1728. {
  1729. unsigned short *tl;
  1730. char rv;
  1731. tl = reply->tag_list;
  1732. rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
  1733. test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
  1734. tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
  1735. put_unaligned(TT_END, tl++); /* Close the tag list */
  1736. return (int)((char *)tl - (char *)reply->tag_list);
  1737. }
  1738. static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1739. struct drbd_nl_cfg_reply *reply)
  1740. {
  1741. /* default to resume from last known position, if possible */
  1742. struct start_ov args =
  1743. { .start_sector = mdev->ov_start_sector };
  1744. if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
  1745. reply->ret_code = ERR_MANDATORY_TAG;
  1746. return 0;
  1747. }
  1748. /* w_make_ov_request expects position to be aligned */
  1749. mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
  1750. reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
  1751. return 0;
  1752. }
  1753. static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1754. struct drbd_nl_cfg_reply *reply)
  1755. {
  1756. int retcode = NO_ERROR;
  1757. int skip_initial_sync = 0;
  1758. int err;
  1759. struct new_c_uuid args;
  1760. memset(&args, 0, sizeof(struct new_c_uuid));
  1761. if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
  1762. reply->ret_code = ERR_MANDATORY_TAG;
  1763. return 0;
  1764. }
  1765. mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
  1766. if (!get_ldev(mdev)) {
  1767. retcode = ERR_NO_DISK;
  1768. goto out;
  1769. }
  1770. /* this is "skip initial sync", assume to be clean */
  1771. if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
  1772. mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
  1773. dev_info(DEV, "Preparing to skip initial sync\n");
  1774. skip_initial_sync = 1;
  1775. } else if (mdev->state.conn != C_STANDALONE) {
  1776. retcode = ERR_CONNECTED;
  1777. goto out_dec;
  1778. }
  1779. drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
  1780. drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
  1781. if (args.clear_bm) {
  1782. err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
  1783. if (err) {
  1784. dev_err(DEV, "Writing bitmap failed with %d\n",err);
  1785. retcode = ERR_IO_MD_DISK;
  1786. }
  1787. if (skip_initial_sync) {
  1788. drbd_send_uuids_skip_initial_sync(mdev);
  1789. _drbd_uuid_set(mdev, UI_BITMAP, 0);
  1790. spin_lock_irq(&mdev->req_lock);
  1791. _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
  1792. CS_VERBOSE, NULL);
  1793. spin_unlock_irq(&mdev->req_lock);
  1794. }
  1795. }
  1796. drbd_md_sync(mdev);
  1797. out_dec:
  1798. put_ldev(mdev);
  1799. out:
  1800. mutex_unlock(&mdev->state_mutex);
  1801. reply->ret_code = retcode;
  1802. return 0;
  1803. }
  1804. struct cn_handler_struct {
  1805. int (*function)(struct drbd_conf *,
  1806. struct drbd_nl_cfg_req *,
  1807. struct drbd_nl_cfg_reply *);
  1808. int reply_body_size;
  1809. };
  1810. static struct cn_handler_struct cnd_table[] = {
  1811. [ P_primary ] = { &drbd_nl_primary, 0 },
  1812. [ P_secondary ] = { &drbd_nl_secondary, 0 },
  1813. [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
  1814. [ P_detach ] = { &drbd_nl_detach, 0 },
  1815. [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
  1816. [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
  1817. [ P_resize ] = { &drbd_nl_resize, 0 },
  1818. [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
  1819. [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
  1820. [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
  1821. [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
  1822. [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
  1823. [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
  1824. [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
  1825. [ P_outdate ] = { &drbd_nl_outdate, 0 },
  1826. [ P_get_config ] = { &drbd_nl_get_config,
  1827. sizeof(struct syncer_conf_tag_len_struct) +
  1828. sizeof(struct disk_conf_tag_len_struct) +
  1829. sizeof(struct net_conf_tag_len_struct) },
  1830. [ P_get_state ] = { &drbd_nl_get_state,
  1831. sizeof(struct get_state_tag_len_struct) +
  1832. sizeof(struct sync_progress_tag_len_struct) },
  1833. [ P_get_uuids ] = { &drbd_nl_get_uuids,
  1834. sizeof(struct get_uuids_tag_len_struct) },
  1835. [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
  1836. sizeof(struct get_timeout_flag_tag_len_struct)},
  1837. [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
  1838. [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
  1839. };
  1840. static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
  1841. {
  1842. struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
  1843. struct cn_handler_struct *cm;
  1844. struct cn_msg *cn_reply;
  1845. struct drbd_nl_cfg_reply *reply;
  1846. struct drbd_conf *mdev;
  1847. int retcode, rr;
  1848. int reply_size = sizeof(struct cn_msg)
  1849. + sizeof(struct drbd_nl_cfg_reply)
  1850. + sizeof(short int);
  1851. if (!try_module_get(THIS_MODULE)) {
  1852. printk(KERN_ERR "drbd: try_module_get() failed!\n");
  1853. return;
  1854. }
  1855. if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
  1856. retcode = ERR_PERM;
  1857. goto fail;
  1858. }
  1859. mdev = ensure_mdev(nlp->drbd_minor,
  1860. (nlp->flags & DRBD_NL_CREATE_DEVICE));
  1861. if (!mdev) {
  1862. retcode = ERR_MINOR_INVALID;
  1863. goto fail;
  1864. }
  1865. if (nlp->packet_type >= P_nl_after_last_packet) {
  1866. retcode = ERR_PACKET_NR;
  1867. goto fail;
  1868. }
  1869. cm = cnd_table + nlp->packet_type;
  1870. /* This may happen if packet number is 0: */
  1871. if (cm->function == NULL) {
  1872. retcode = ERR_PACKET_NR;
  1873. goto fail;
  1874. }
  1875. reply_size += cm->reply_body_size;
  1876. /* allocation not in the IO path, cqueue thread context */
  1877. cn_reply = kmalloc(reply_size, GFP_KERNEL);
  1878. if (!cn_reply) {
  1879. retcode = ERR_NOMEM;
  1880. goto fail;
  1881. }
  1882. reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
  1883. reply->packet_type =
  1884. cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
  1885. reply->minor = nlp->drbd_minor;
  1886. reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
  1887. /* reply->tag_list; might be modified by cm->function. */
  1888. rr = cm->function(mdev, nlp, reply);
  1889. cn_reply->id = req->id;
  1890. cn_reply->seq = req->seq;
  1891. cn_reply->ack = req->ack + 1;
  1892. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
  1893. cn_reply->flags = 0;
  1894. rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
  1895. if (rr && rr != -ESRCH)
  1896. printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
  1897. kfree(cn_reply);
  1898. module_put(THIS_MODULE);
  1899. return;
  1900. fail:
  1901. drbd_nl_send_reply(req, retcode);
  1902. module_put(THIS_MODULE);
  1903. }
  1904. static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
  1905. static unsigned short *
  1906. __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
  1907. unsigned short len, int nul_terminated)
  1908. {
  1909. unsigned short l = tag_descriptions[tag_number(tag)].max_len;
  1910. len = (len < l) ? len : l;
  1911. put_unaligned(tag, tl++);
  1912. put_unaligned(len, tl++);
  1913. memcpy(tl, data, len);
  1914. tl = (unsigned short*)((char*)tl + len);
  1915. if (nul_terminated)
  1916. *((char*)tl - 1) = 0;
  1917. return tl;
  1918. }
  1919. static unsigned short *
  1920. tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
  1921. {
  1922. return __tl_add_blob(tl, tag, data, len, 0);
  1923. }
  1924. static unsigned short *
  1925. tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
  1926. {
  1927. return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
  1928. }
  1929. static unsigned short *
  1930. tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
  1931. {
  1932. put_unaligned(tag, tl++);
  1933. switch(tag_type(tag)) {
  1934. case TT_INTEGER:
  1935. put_unaligned(sizeof(int), tl++);
  1936. put_unaligned(*(int *)val, (int *)tl);
  1937. tl = (unsigned short*)((char*)tl+sizeof(int));
  1938. break;
  1939. case TT_INT64:
  1940. put_unaligned(sizeof(u64), tl++);
  1941. put_unaligned(*(u64 *)val, (u64 *)tl);
  1942. tl = (unsigned short*)((char*)tl+sizeof(u64));
  1943. break;
  1944. default:
  1945. /* someone did something stupid. */
  1946. ;
  1947. }
  1948. return tl;
  1949. }
  1950. void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
  1951. {
  1952. char buffer[sizeof(struct cn_msg)+
  1953. sizeof(struct drbd_nl_cfg_reply)+
  1954. sizeof(struct get_state_tag_len_struct)+
  1955. sizeof(short int)];
  1956. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  1957. struct drbd_nl_cfg_reply *reply =
  1958. (struct drbd_nl_cfg_reply *)cn_reply->data;
  1959. unsigned short *tl = reply->tag_list;
  1960. /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
  1961. tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
  1962. put_unaligned(TT_END, tl++); /* Close the tag list */
  1963. cn_reply->id.idx = CN_IDX_DRBD;
  1964. cn_reply->id.val = CN_VAL_DRBD;
  1965. cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
  1966. cn_reply->ack = 0; /* not used here. */
  1967. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  1968. (int)((char *)tl - (char *)reply->tag_list);
  1969. cn_reply->flags = 0;
  1970. reply->packet_type = P_get_state;
  1971. reply->minor = mdev_to_minor(mdev);
  1972. reply->ret_code = NO_ERROR;
  1973. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  1974. }
  1975. void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
  1976. {
  1977. char buffer[sizeof(struct cn_msg)+
  1978. sizeof(struct drbd_nl_cfg_reply)+
  1979. sizeof(struct call_helper_tag_len_struct)+
  1980. sizeof(short int)];
  1981. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  1982. struct drbd_nl_cfg_reply *reply =
  1983. (struct drbd_nl_cfg_reply *)cn_reply->data;
  1984. unsigned short *tl = reply->tag_list;
  1985. /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
  1986. tl = tl_add_str(tl, T_helper, helper_name);
  1987. put_unaligned(TT_END, tl++); /* Close the tag list */
  1988. cn_reply->id.idx = CN_IDX_DRBD;
  1989. cn_reply->id.val = CN_VAL_DRBD;
  1990. cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
  1991. cn_reply->ack = 0; /* not used here. */
  1992. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  1993. (int)((char *)tl - (char *)reply->tag_list);
  1994. cn_reply->flags = 0;
  1995. reply->packet_type = P_call_helper;
  1996. reply->minor = mdev_to_minor(mdev);
  1997. reply->ret_code = NO_ERROR;
  1998. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  1999. }
  2000. void drbd_bcast_ee(struct drbd_conf *mdev,
  2001. const char *reason, const int dgs,
  2002. const char* seen_hash, const char* calc_hash,
  2003. const struct drbd_epoch_entry* e)
  2004. {
  2005. struct cn_msg *cn_reply;
  2006. struct drbd_nl_cfg_reply *reply;
  2007. unsigned short *tl;
  2008. struct page *page;
  2009. unsigned len;
  2010. if (!e)
  2011. return;
  2012. if (!reason || !reason[0])
  2013. return;
  2014. /* apparently we have to memcpy twice, first to prepare the data for the
  2015. * struct cn_msg, then within cn_netlink_send from the cn_msg to the
  2016. * netlink skb. */
  2017. /* receiver thread context, which is not in the writeout path (of this node),
  2018. * but may be in the writeout path of the _other_ node.
  2019. * GFP_NOIO to avoid potential "distributed deadlock". */
  2020. cn_reply = kmalloc(
  2021. sizeof(struct cn_msg)+
  2022. sizeof(struct drbd_nl_cfg_reply)+
  2023. sizeof(struct dump_ee_tag_len_struct)+
  2024. sizeof(short int),
  2025. GFP_NOIO);
  2026. if (!cn_reply) {
  2027. dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
  2028. (unsigned long long)e->sector, e->size);
  2029. return;
  2030. }
  2031. reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
  2032. tl = reply->tag_list;
  2033. tl = tl_add_str(tl, T_dump_ee_reason, reason);
  2034. tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
  2035. tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
  2036. tl = tl_add_int(tl, T_ee_sector, &e->sector);
  2037. tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
  2038. put_unaligned(T_ee_data, tl++);
  2039. put_unaligned(e->size, tl++);
  2040. len = e->size;
  2041. page = e->pages;
  2042. page_chain_for_each(page) {
  2043. void *d = kmap_atomic(page, KM_USER0);
  2044. unsigned l = min_t(unsigned, len, PAGE_SIZE);
  2045. memcpy(tl, d, l);
  2046. kunmap_atomic(d, KM_USER0);
  2047. tl = (unsigned short*)((char*)tl + l);
  2048. len -= l;
  2049. }
  2050. put_unaligned(TT_END, tl++); /* Close the tag list */
  2051. cn_reply->id.idx = CN_IDX_DRBD;
  2052. cn_reply->id.val = CN_VAL_DRBD;
  2053. cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
  2054. cn_reply->ack = 0; // not used here.
  2055. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  2056. (int)((char*)tl - (char*)reply->tag_list);
  2057. cn_reply->flags = 0;
  2058. reply->packet_type = P_dump_ee;
  2059. reply->minor = mdev_to_minor(mdev);
  2060. reply->ret_code = NO_ERROR;
  2061. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2062. kfree(cn_reply);
  2063. }
  2064. void drbd_bcast_sync_progress(struct drbd_conf *mdev)
  2065. {
  2066. char buffer[sizeof(struct cn_msg)+
  2067. sizeof(struct drbd_nl_cfg_reply)+
  2068. sizeof(struct sync_progress_tag_len_struct)+
  2069. sizeof(short int)];
  2070. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  2071. struct drbd_nl_cfg_reply *reply =
  2072. (struct drbd_nl_cfg_reply *)cn_reply->data;
  2073. unsigned short *tl = reply->tag_list;
  2074. unsigned long rs_left;
  2075. unsigned int res;
  2076. /* no local ref, no bitmap, no syncer progress, no broadcast. */
  2077. if (!get_ldev(mdev))
  2078. return;
  2079. drbd_get_syncer_progress(mdev, &rs_left, &res);
  2080. put_ldev(mdev);
  2081. tl = tl_add_int(tl, T_sync_progress, &res);
  2082. put_unaligned(TT_END, tl++); /* Close the tag list */
  2083. cn_reply->id.idx = CN_IDX_DRBD;
  2084. cn_reply->id.val = CN_VAL_DRBD;
  2085. cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
  2086. cn_reply->ack = 0; /* not used here. */
  2087. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  2088. (int)((char *)tl - (char *)reply->tag_list);
  2089. cn_reply->flags = 0;
  2090. reply->packet_type = P_sync_progress;
  2091. reply->minor = mdev_to_minor(mdev);
  2092. reply->ret_code = NO_ERROR;
  2093. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2094. }
  2095. int __init drbd_nl_init(void)
  2096. {
  2097. static struct cb_id cn_id_drbd;
  2098. int err, try=10;
  2099. cn_id_drbd.val = CN_VAL_DRBD;
  2100. do {
  2101. cn_id_drbd.idx = cn_idx;
  2102. err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
  2103. if (!err)
  2104. break;
  2105. cn_idx = (cn_idx + CN_IDX_STEP);
  2106. } while (try--);
  2107. if (err) {
  2108. printk(KERN_ERR "drbd: cn_drbd failed to register\n");
  2109. return err;
  2110. }
  2111. return 0;
  2112. }
  2113. void drbd_nl_cleanup(void)
  2114. {
  2115. static struct cb_id cn_id_drbd;
  2116. cn_id_drbd.idx = cn_idx;
  2117. cn_id_drbd.val = CN_VAL_DRBD;
  2118. cn_del_callback(&cn_id_drbd);
  2119. }
  2120. void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
  2121. {
  2122. char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
  2123. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  2124. struct drbd_nl_cfg_reply *reply =
  2125. (struct drbd_nl_cfg_reply *)cn_reply->data;
  2126. int rr;
  2127. cn_reply->id = req->id;
  2128. cn_reply->seq = req->seq;
  2129. cn_reply->ack = req->ack + 1;
  2130. cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
  2131. cn_reply->flags = 0;
  2132. reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
  2133. reply->ret_code = ret_code;
  2134. rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2135. if (rr && rr != -ESRCH)
  2136. printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
  2137. }