drbd_nl.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433
  1. /*
  2. drbd_nl.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/drbd.h>
  21. #include <linux/in.h>
  22. #include <linux/fs.h>
  23. #include <linux/file.h>
  24. #include <linux/slab.h>
  25. #include <linux/connector.h>
  26. #include <linux/blkpg.h>
  27. #include <linux/cpumask.h>
  28. #include "drbd_int.h"
  29. #include "drbd_req.h"
  30. #include "drbd_wrappers.h"
  31. #include <asm/unaligned.h>
  32. #include <linux/drbd_tag_magic.h>
  33. #include <linux/drbd_limits.h>
  34. #include <linux/compiler.h>
  35. #include <linux/kthread.h>
  36. static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
  37. static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
  38. static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
  39. /* see get_sb_bdev and bd_claim */
  40. static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
  41. /* Generate the tag_list to struct functions */
  42. #define NL_PACKET(name, number, fields) \
  43. static int name ## _from_tags(struct drbd_conf *mdev, \
  44. unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
  45. static int name ## _from_tags(struct drbd_conf *mdev, \
  46. unsigned short *tags, struct name *arg) \
  47. { \
  48. int tag; \
  49. int dlen; \
  50. \
  51. while ((tag = get_unaligned(tags++)) != TT_END) { \
  52. dlen = get_unaligned(tags++); \
  53. switch (tag_number(tag)) { \
  54. fields \
  55. default: \
  56. if (tag & T_MANDATORY) { \
  57. dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
  58. return 0; \
  59. } \
  60. } \
  61. tags = (unsigned short *)((char *)tags + dlen); \
  62. } \
  63. return 1; \
  64. }
  65. #define NL_INTEGER(pn, pr, member) \
  66. case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
  67. arg->member = get_unaligned((int *)(tags)); \
  68. break;
  69. #define NL_INT64(pn, pr, member) \
  70. case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
  71. arg->member = get_unaligned((u64 *)(tags)); \
  72. break;
  73. #define NL_BIT(pn, pr, member) \
  74. case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
  75. arg->member = *(char *)(tags) ? 1 : 0; \
  76. break;
  77. #define NL_STRING(pn, pr, member, len) \
  78. case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
  79. if (dlen > len) { \
  80. dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
  81. #member, dlen, (unsigned int)len); \
  82. return 0; \
  83. } \
  84. arg->member ## _len = dlen; \
  85. memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
  86. break;
  87. #include "linux/drbd_nl.h"
  88. /* Generate the struct to tag_list functions */
  89. #define NL_PACKET(name, number, fields) \
  90. static unsigned short* \
  91. name ## _to_tags(struct drbd_conf *mdev, \
  92. struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
  93. static unsigned short* \
  94. name ## _to_tags(struct drbd_conf *mdev, \
  95. struct name *arg, unsigned short *tags) \
  96. { \
  97. fields \
  98. return tags; \
  99. }
  100. #define NL_INTEGER(pn, pr, member) \
  101. put_unaligned(pn | pr | TT_INTEGER, tags++); \
  102. put_unaligned(sizeof(int), tags++); \
  103. put_unaligned(arg->member, (int *)tags); \
  104. tags = (unsigned short *)((char *)tags+sizeof(int));
  105. #define NL_INT64(pn, pr, member) \
  106. put_unaligned(pn | pr | TT_INT64, tags++); \
  107. put_unaligned(sizeof(u64), tags++); \
  108. put_unaligned(arg->member, (u64 *)tags); \
  109. tags = (unsigned short *)((char *)tags+sizeof(u64));
  110. #define NL_BIT(pn, pr, member) \
  111. put_unaligned(pn | pr | TT_BIT, tags++); \
  112. put_unaligned(sizeof(char), tags++); \
  113. *(char *)tags = arg->member; \
  114. tags = (unsigned short *)((char *)tags+sizeof(char));
  115. #define NL_STRING(pn, pr, member, len) \
  116. put_unaligned(pn | pr | TT_STRING, tags++); \
  117. put_unaligned(arg->member ## _len, tags++); \
  118. memcpy(tags, arg->member, arg->member ## _len); \
  119. tags = (unsigned short *)((char *)tags + arg->member ## _len);
  120. #include "linux/drbd_nl.h"
  121. void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
  122. void drbd_nl_send_reply(struct cn_msg *, int);
  123. int drbd_khelper(struct drbd_conf *mdev, char *cmd)
  124. {
  125. char *envp[] = { "HOME=/",
  126. "TERM=linux",
  127. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  128. NULL, /* Will be set to address family */
  129. NULL, /* Will be set to address */
  130. NULL };
  131. char mb[12], af[20], ad[60], *afs;
  132. char *argv[] = {usermode_helper, cmd, mb, NULL };
  133. int ret;
  134. snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
  135. if (get_net_conf(mdev)) {
  136. switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
  137. case AF_INET6:
  138. afs = "ipv6";
  139. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
  140. &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
  141. break;
  142. case AF_INET:
  143. afs = "ipv4";
  144. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
  145. &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
  146. break;
  147. default:
  148. afs = "ssocks";
  149. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
  150. &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
  151. }
  152. snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
  153. envp[3]=af;
  154. envp[4]=ad;
  155. put_net_conf(mdev);
  156. }
  157. dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
  158. drbd_bcast_ev_helper(mdev, cmd);
  159. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  160. if (ret)
  161. dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  162. usermode_helper, cmd, mb,
  163. (ret >> 8) & 0xff, ret);
  164. else
  165. dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  166. usermode_helper, cmd, mb,
  167. (ret >> 8) & 0xff, ret);
  168. if (ret < 0) /* Ignore any ERRNOs we got. */
  169. ret = 0;
  170. return ret;
  171. }
  172. enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
  173. {
  174. char *ex_to_string;
  175. int r;
  176. enum drbd_disk_state nps;
  177. enum drbd_fencing_p fp;
  178. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  179. if (get_ldev_if_state(mdev, D_CONSISTENT)) {
  180. fp = mdev->ldev->dc.fencing;
  181. put_ldev(mdev);
  182. } else {
  183. dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
  184. return mdev->state.pdsk;
  185. }
  186. r = drbd_khelper(mdev, "fence-peer");
  187. switch ((r>>8) & 0xff) {
  188. case 3: /* peer is inconsistent */
  189. ex_to_string = "peer is inconsistent or worse";
  190. nps = D_INCONSISTENT;
  191. break;
  192. case 4: /* peer got outdated, or was already outdated */
  193. ex_to_string = "peer was fenced";
  194. nps = D_OUTDATED;
  195. break;
  196. case 5: /* peer was down */
  197. if (mdev->state.disk == D_UP_TO_DATE) {
  198. /* we will(have) create(d) a new UUID anyways... */
  199. ex_to_string = "peer is unreachable, assumed to be dead";
  200. nps = D_OUTDATED;
  201. } else {
  202. ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
  203. nps = mdev->state.pdsk;
  204. }
  205. break;
  206. case 6: /* Peer is primary, voluntarily outdate myself.
  207. * This is useful when an unconnected R_SECONDARY is asked to
  208. * become R_PRIMARY, but finds the other peer being active. */
  209. ex_to_string = "peer is active";
  210. dev_warn(DEV, "Peer is primary, outdating myself.\n");
  211. nps = D_UNKNOWN;
  212. _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
  213. break;
  214. case 7:
  215. if (fp != FP_STONITH)
  216. dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
  217. ex_to_string = "peer was stonithed";
  218. nps = D_OUTDATED;
  219. break;
  220. default:
  221. /* The script is broken ... */
  222. nps = D_UNKNOWN;
  223. dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
  224. return nps;
  225. }
  226. dev_info(DEV, "fence-peer helper returned %d (%s)\n",
  227. (r>>8) & 0xff, ex_to_string);
  228. return nps;
  229. }
  230. static int _try_outdate_peer_async(void *data)
  231. {
  232. struct drbd_conf *mdev = (struct drbd_conf *)data;
  233. enum drbd_disk_state nps;
  234. nps = drbd_try_outdate_peer(mdev);
  235. drbd_request_state(mdev, NS(pdsk, nps));
  236. return 0;
  237. }
  238. void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
  239. {
  240. struct task_struct *opa;
  241. opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
  242. if (IS_ERR(opa))
  243. dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
  244. }
  245. int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
  246. {
  247. const int max_tries = 4;
  248. int r = 0;
  249. int try = 0;
  250. int forced = 0;
  251. union drbd_state mask, val;
  252. enum drbd_disk_state nps;
  253. if (new_role == R_PRIMARY)
  254. request_ping(mdev); /* Detect a dead peer ASAP */
  255. mutex_lock(&mdev->state_mutex);
  256. mask.i = 0; mask.role = R_MASK;
  257. val.i = 0; val.role = new_role;
  258. while (try++ < max_tries) {
  259. r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
  260. /* in case we first succeeded to outdate,
  261. * but now suddenly could establish a connection */
  262. if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
  263. val.pdsk = 0;
  264. mask.pdsk = 0;
  265. continue;
  266. }
  267. if (r == SS_NO_UP_TO_DATE_DISK && force &&
  268. (mdev->state.disk < D_UP_TO_DATE &&
  269. mdev->state.disk >= D_INCONSISTENT)) {
  270. mask.disk = D_MASK;
  271. val.disk = D_UP_TO_DATE;
  272. forced = 1;
  273. continue;
  274. }
  275. if (r == SS_NO_UP_TO_DATE_DISK &&
  276. mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
  277. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  278. nps = drbd_try_outdate_peer(mdev);
  279. if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
  280. val.disk = D_UP_TO_DATE;
  281. mask.disk = D_MASK;
  282. }
  283. val.pdsk = nps;
  284. mask.pdsk = D_MASK;
  285. continue;
  286. }
  287. if (r == SS_NOTHING_TO_DO)
  288. goto fail;
  289. if (r == SS_PRIMARY_NOP && mask.pdsk == 0) {
  290. nps = drbd_try_outdate_peer(mdev);
  291. if (force && nps > D_OUTDATED) {
  292. dev_warn(DEV, "Forced into split brain situation!\n");
  293. nps = D_OUTDATED;
  294. }
  295. mask.pdsk = D_MASK;
  296. val.pdsk = nps;
  297. continue;
  298. }
  299. if (r == SS_TWO_PRIMARIES) {
  300. /* Maybe the peer is detected as dead very soon...
  301. retry at most once more in this case. */
  302. __set_current_state(TASK_INTERRUPTIBLE);
  303. schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
  304. if (try < max_tries)
  305. try = max_tries - 1;
  306. continue;
  307. }
  308. if (r < SS_SUCCESS) {
  309. r = _drbd_request_state(mdev, mask, val,
  310. CS_VERBOSE + CS_WAIT_COMPLETE);
  311. if (r < SS_SUCCESS)
  312. goto fail;
  313. }
  314. break;
  315. }
  316. if (r < SS_SUCCESS)
  317. goto fail;
  318. if (forced)
  319. dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
  320. /* Wait until nothing is on the fly :) */
  321. wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
  322. if (new_role == R_SECONDARY) {
  323. set_disk_ro(mdev->vdisk, TRUE);
  324. if (get_ldev(mdev)) {
  325. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  326. put_ldev(mdev);
  327. }
  328. } else {
  329. if (get_net_conf(mdev)) {
  330. mdev->net_conf->want_lose = 0;
  331. put_net_conf(mdev);
  332. }
  333. set_disk_ro(mdev->vdisk, FALSE);
  334. if (get_ldev(mdev)) {
  335. if (((mdev->state.conn < C_CONNECTED ||
  336. mdev->state.pdsk <= D_FAILED)
  337. && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
  338. drbd_uuid_new_current(mdev);
  339. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  340. put_ldev(mdev);
  341. }
  342. }
  343. if ((new_role == R_SECONDARY) && get_ldev(mdev)) {
  344. drbd_al_to_on_disk_bm(mdev);
  345. put_ldev(mdev);
  346. }
  347. if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
  348. /* if this was forced, we should consider sync */
  349. if (forced)
  350. drbd_send_uuids(mdev);
  351. drbd_send_state(mdev);
  352. }
  353. drbd_md_sync(mdev);
  354. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  355. fail:
  356. mutex_unlock(&mdev->state_mutex);
  357. return r;
  358. }
  359. static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  360. struct drbd_nl_cfg_reply *reply)
  361. {
  362. struct primary primary_args;
  363. memset(&primary_args, 0, sizeof(struct primary));
  364. if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
  365. reply->ret_code = ERR_MANDATORY_TAG;
  366. return 0;
  367. }
  368. reply->ret_code =
  369. drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
  370. return 0;
  371. }
  372. static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  373. struct drbd_nl_cfg_reply *reply)
  374. {
  375. reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
  376. return 0;
  377. }
  378. /* initializes the md.*_offset members, so we are able to find
  379. * the on disk meta data */
  380. static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
  381. struct drbd_backing_dev *bdev)
  382. {
  383. sector_t md_size_sect = 0;
  384. switch (bdev->dc.meta_dev_idx) {
  385. default:
  386. /* v07 style fixed size indexed meta data */
  387. bdev->md.md_size_sect = MD_RESERVED_SECT;
  388. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  389. bdev->md.al_offset = MD_AL_OFFSET;
  390. bdev->md.bm_offset = MD_BM_OFFSET;
  391. break;
  392. case DRBD_MD_INDEX_FLEX_EXT:
  393. /* just occupy the full device; unit: sectors */
  394. bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
  395. bdev->md.md_offset = 0;
  396. bdev->md.al_offset = MD_AL_OFFSET;
  397. bdev->md.bm_offset = MD_BM_OFFSET;
  398. break;
  399. case DRBD_MD_INDEX_INTERNAL:
  400. case DRBD_MD_INDEX_FLEX_INT:
  401. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  402. /* al size is still fixed */
  403. bdev->md.al_offset = -MD_AL_MAX_SIZE;
  404. /* we need (slightly less than) ~ this much bitmap sectors: */
  405. md_size_sect = drbd_get_capacity(bdev->backing_bdev);
  406. md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
  407. md_size_sect = BM_SECT_TO_EXT(md_size_sect);
  408. md_size_sect = ALIGN(md_size_sect, 8);
  409. /* plus the "drbd meta data super block",
  410. * and the activity log; */
  411. md_size_sect += MD_BM_OFFSET;
  412. bdev->md.md_size_sect = md_size_sect;
  413. /* bitmap offset is adjusted by 'super' block size */
  414. bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
  415. break;
  416. }
  417. }
  418. char *ppsize(char *buf, unsigned long long size)
  419. {
  420. /* Needs 9 bytes at max. */
  421. static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
  422. int base = 0;
  423. while (size >= 10000) {
  424. /* shift + round */
  425. size = (size >> 10) + !!(size & (1<<9));
  426. base++;
  427. }
  428. sprintf(buf, "%lu %cB", (long)size, units[base]);
  429. return buf;
  430. }
  431. /* there is still a theoretical deadlock when called from receiver
  432. * on an D_INCONSISTENT R_PRIMARY:
  433. * remote READ does inc_ap_bio, receiver would need to receive answer
  434. * packet from remote to dec_ap_bio again.
  435. * receiver receive_sizes(), comes here,
  436. * waits for ap_bio_cnt == 0. -> deadlock.
  437. * but this cannot happen, actually, because:
  438. * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
  439. * (not connected, or bad/no disk on peer):
  440. * see drbd_fail_request_early, ap_bio_cnt is zero.
  441. * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
  442. * peer may not initiate a resize.
  443. */
  444. void drbd_suspend_io(struct drbd_conf *mdev)
  445. {
  446. set_bit(SUSPEND_IO, &mdev->flags);
  447. if (mdev->state.susp)
  448. return;
  449. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
  450. }
  451. void drbd_resume_io(struct drbd_conf *mdev)
  452. {
  453. clear_bit(SUSPEND_IO, &mdev->flags);
  454. wake_up(&mdev->misc_wait);
  455. }
  456. /**
  457. * drbd_determine_dev_size() - Sets the right device size obeying all constraints
  458. * @mdev: DRBD device.
  459. *
  460. * Returns 0 on success, negative return values indicate errors.
  461. * You should call drbd_md_sync() after calling this function.
  462. */
  463. enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
  464. {
  465. sector_t prev_first_sect, prev_size; /* previous meta location */
  466. sector_t la_size;
  467. sector_t size;
  468. char ppb[10];
  469. int md_moved, la_size_changed;
  470. enum determine_dev_size rv = unchanged;
  471. /* race:
  472. * application request passes inc_ap_bio,
  473. * but then cannot get an AL-reference.
  474. * this function later may wait on ap_bio_cnt == 0. -> deadlock.
  475. *
  476. * to avoid that:
  477. * Suspend IO right here.
  478. * still lock the act_log to not trigger ASSERTs there.
  479. */
  480. drbd_suspend_io(mdev);
  481. /* no wait necessary anymore, actually we could assert that */
  482. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  483. prev_first_sect = drbd_md_first_sector(mdev->ldev);
  484. prev_size = mdev->ldev->md.md_size_sect;
  485. la_size = mdev->ldev->md.la_size_sect;
  486. /* TODO: should only be some assert here, not (re)init... */
  487. drbd_md_set_sector_offsets(mdev, mdev->ldev);
  488. size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
  489. if (drbd_get_capacity(mdev->this_bdev) != size ||
  490. drbd_bm_capacity(mdev) != size) {
  491. int err;
  492. err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
  493. if (unlikely(err)) {
  494. /* currently there is only one error: ENOMEM! */
  495. size = drbd_bm_capacity(mdev)>>1;
  496. if (size == 0) {
  497. dev_err(DEV, "OUT OF MEMORY! "
  498. "Could not allocate bitmap!\n");
  499. } else {
  500. dev_err(DEV, "BM resizing failed. "
  501. "Leaving size unchanged at size = %lu KB\n",
  502. (unsigned long)size);
  503. }
  504. rv = dev_size_error;
  505. }
  506. /* racy, see comments above. */
  507. drbd_set_my_capacity(mdev, size);
  508. mdev->ldev->md.la_size_sect = size;
  509. dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
  510. (unsigned long long)size>>1);
  511. }
  512. if (rv == dev_size_error)
  513. goto out;
  514. la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
  515. md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
  516. || prev_size != mdev->ldev->md.md_size_sect;
  517. if (la_size_changed || md_moved) {
  518. drbd_al_shrink(mdev); /* All extents inactive. */
  519. dev_info(DEV, "Writing the whole bitmap, %s\n",
  520. la_size_changed && md_moved ? "size changed and md moved" :
  521. la_size_changed ? "size changed" : "md moved");
  522. rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */
  523. drbd_md_mark_dirty(mdev);
  524. }
  525. if (size > la_size)
  526. rv = grew;
  527. if (size < la_size)
  528. rv = shrunk;
  529. out:
  530. lc_unlock(mdev->act_log);
  531. wake_up(&mdev->al_wait);
  532. drbd_resume_io(mdev);
  533. return rv;
  534. }
  535. sector_t
  536. drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
  537. {
  538. sector_t p_size = mdev->p_size; /* partner's disk size. */
  539. sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
  540. sector_t m_size; /* my size */
  541. sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
  542. sector_t size = 0;
  543. m_size = drbd_get_max_capacity(bdev);
  544. if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
  545. dev_warn(DEV, "Resize while not connected was forced by the user!\n");
  546. p_size = m_size;
  547. }
  548. if (p_size && m_size) {
  549. size = min_t(sector_t, p_size, m_size);
  550. } else {
  551. if (la_size) {
  552. size = la_size;
  553. if (m_size && m_size < size)
  554. size = m_size;
  555. if (p_size && p_size < size)
  556. size = p_size;
  557. } else {
  558. if (m_size)
  559. size = m_size;
  560. if (p_size)
  561. size = p_size;
  562. }
  563. }
  564. if (size == 0)
  565. dev_err(DEV, "Both nodes diskless!\n");
  566. if (u_size) {
  567. if (u_size > size)
  568. dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
  569. (unsigned long)u_size>>1, (unsigned long)size>>1);
  570. else
  571. size = u_size;
  572. }
  573. return size;
  574. }
  575. /**
  576. * drbd_check_al_size() - Ensures that the AL is of the right size
  577. * @mdev: DRBD device.
  578. *
  579. * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
  580. * failed, and 0 on success. You should call drbd_md_sync() after you called
  581. * this function.
  582. */
  583. static int drbd_check_al_size(struct drbd_conf *mdev)
  584. {
  585. struct lru_cache *n, *t;
  586. struct lc_element *e;
  587. unsigned int in_use;
  588. int i;
  589. ERR_IF(mdev->sync_conf.al_extents < 7)
  590. mdev->sync_conf.al_extents = 127;
  591. if (mdev->act_log &&
  592. mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
  593. return 0;
  594. in_use = 0;
  595. t = mdev->act_log;
  596. n = lc_create("act_log", drbd_al_ext_cache,
  597. mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
  598. if (n == NULL) {
  599. dev_err(DEV, "Cannot allocate act_log lru!\n");
  600. return -ENOMEM;
  601. }
  602. spin_lock_irq(&mdev->al_lock);
  603. if (t) {
  604. for (i = 0; i < t->nr_elements; i++) {
  605. e = lc_element_by_index(t, i);
  606. if (e->refcnt)
  607. dev_err(DEV, "refcnt(%d)==%d\n",
  608. e->lc_number, e->refcnt);
  609. in_use += e->refcnt;
  610. }
  611. }
  612. if (!in_use)
  613. mdev->act_log = n;
  614. spin_unlock_irq(&mdev->al_lock);
  615. if (in_use) {
  616. dev_err(DEV, "Activity log still in use!\n");
  617. lc_destroy(n);
  618. return -EBUSY;
  619. } else {
  620. if (t)
  621. lc_destroy(t);
  622. }
  623. drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
  624. return 0;
  625. }
  626. void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local)
  627. {
  628. struct request_queue * const q = mdev->rq_queue;
  629. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  630. int max_segments = mdev->ldev->dc.max_bio_bvecs;
  631. max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
  632. blk_queue_max_hw_sectors(q, max_seg_s >> 9);
  633. blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
  634. blk_queue_max_segment_size(q, max_seg_s);
  635. blk_queue_logical_block_size(q, 512);
  636. blk_queue_segment_boundary(q, PAGE_SIZE-1);
  637. blk_stack_limits(&q->limits, &b->limits, 0);
  638. if (b->merge_bvec_fn)
  639. dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n",
  640. b->merge_bvec_fn);
  641. dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q));
  642. if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
  643. dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
  644. q->backing_dev_info.ra_pages,
  645. b->backing_dev_info.ra_pages);
  646. q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
  647. }
  648. }
  649. /* serialize deconfig (worker exiting, doing cleanup)
  650. * and reconfig (drbdsetup disk, drbdsetup net)
  651. *
  652. * wait for a potentially exiting worker, then restart it,
  653. * or start a new one.
  654. */
  655. static void drbd_reconfig_start(struct drbd_conf *mdev)
  656. {
  657. wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
  658. wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
  659. drbd_thread_start(&mdev->worker);
  660. }
  661. /* if still unconfigured, stops worker again.
  662. * if configured now, clears CONFIG_PENDING.
  663. * wakes potential waiters */
  664. static void drbd_reconfig_done(struct drbd_conf *mdev)
  665. {
  666. spin_lock_irq(&mdev->req_lock);
  667. if (mdev->state.disk == D_DISKLESS &&
  668. mdev->state.conn == C_STANDALONE &&
  669. mdev->state.role == R_SECONDARY) {
  670. set_bit(DEVICE_DYING, &mdev->flags);
  671. drbd_thread_stop_nowait(&mdev->worker);
  672. } else
  673. clear_bit(CONFIG_PENDING, &mdev->flags);
  674. spin_unlock_irq(&mdev->req_lock);
  675. wake_up(&mdev->state_wait);
  676. }
  677. /* does always return 0;
  678. * interesting return code is in reply->ret_code */
  679. static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  680. struct drbd_nl_cfg_reply *reply)
  681. {
  682. enum drbd_ret_codes retcode;
  683. enum determine_dev_size dd;
  684. sector_t max_possible_sectors;
  685. sector_t min_md_device_sectors;
  686. struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
  687. struct inode *inode, *inode2;
  688. struct lru_cache *resync_lru = NULL;
  689. union drbd_state ns, os;
  690. int rv;
  691. int cp_discovered = 0;
  692. int logical_block_size;
  693. drbd_reconfig_start(mdev);
  694. /* if you want to reconfigure, please tear down first */
  695. if (mdev->state.disk > D_DISKLESS) {
  696. retcode = ERR_DISK_CONFIGURED;
  697. goto fail;
  698. }
  699. /* allocation not in the IO path, cqueue thread context */
  700. nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
  701. if (!nbc) {
  702. retcode = ERR_NOMEM;
  703. goto fail;
  704. }
  705. nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
  706. nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
  707. nbc->dc.fencing = DRBD_FENCING_DEF;
  708. nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
  709. if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
  710. retcode = ERR_MANDATORY_TAG;
  711. goto fail;
  712. }
  713. if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
  714. retcode = ERR_MD_IDX_INVALID;
  715. goto fail;
  716. }
  717. if (get_net_conf(mdev)) {
  718. int prot = mdev->net_conf->wire_protocol;
  719. put_net_conf(mdev);
  720. if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
  721. retcode = ERR_STONITH_AND_PROT_A;
  722. goto fail;
  723. }
  724. }
  725. nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
  726. if (IS_ERR(nbc->lo_file)) {
  727. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
  728. PTR_ERR(nbc->lo_file));
  729. nbc->lo_file = NULL;
  730. retcode = ERR_OPEN_DISK;
  731. goto fail;
  732. }
  733. inode = nbc->lo_file->f_dentry->d_inode;
  734. if (!S_ISBLK(inode->i_mode)) {
  735. retcode = ERR_DISK_NOT_BDEV;
  736. goto fail;
  737. }
  738. nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
  739. if (IS_ERR(nbc->md_file)) {
  740. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
  741. PTR_ERR(nbc->md_file));
  742. nbc->md_file = NULL;
  743. retcode = ERR_OPEN_MD_DISK;
  744. goto fail;
  745. }
  746. inode2 = nbc->md_file->f_dentry->d_inode;
  747. if (!S_ISBLK(inode2->i_mode)) {
  748. retcode = ERR_MD_NOT_BDEV;
  749. goto fail;
  750. }
  751. nbc->backing_bdev = inode->i_bdev;
  752. if (bd_claim(nbc->backing_bdev, mdev)) {
  753. printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
  754. nbc->backing_bdev, mdev,
  755. nbc->backing_bdev->bd_holder,
  756. nbc->backing_bdev->bd_contains->bd_holder,
  757. nbc->backing_bdev->bd_holders);
  758. retcode = ERR_BDCLAIM_DISK;
  759. goto fail;
  760. }
  761. resync_lru = lc_create("resync", drbd_bm_ext_cache,
  762. 61, sizeof(struct bm_extent),
  763. offsetof(struct bm_extent, lce));
  764. if (!resync_lru) {
  765. retcode = ERR_NOMEM;
  766. goto release_bdev_fail;
  767. }
  768. /* meta_dev_idx >= 0: external fixed size,
  769. * possibly multiple drbd sharing one meta device.
  770. * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is
  771. * not yet used by some other drbd minor!
  772. * (if you use drbd.conf + drbdadm,
  773. * that should check it for you already; but if you don't, or someone
  774. * fooled it, we need to double check here) */
  775. nbc->md_bdev = inode2->i_bdev;
  776. if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev
  777. : (void *) drbd_m_holder)) {
  778. retcode = ERR_BDCLAIM_MD_DISK;
  779. goto release_bdev_fail;
  780. }
  781. if ((nbc->backing_bdev == nbc->md_bdev) !=
  782. (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
  783. nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
  784. retcode = ERR_MD_IDX_INVALID;
  785. goto release_bdev2_fail;
  786. }
  787. /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
  788. drbd_md_set_sector_offsets(mdev, nbc);
  789. if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
  790. dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
  791. (unsigned long long) drbd_get_max_capacity(nbc),
  792. (unsigned long long) nbc->dc.disk_size);
  793. retcode = ERR_DISK_TO_SMALL;
  794. goto release_bdev2_fail;
  795. }
  796. if (nbc->dc.meta_dev_idx < 0) {
  797. max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
  798. /* at least one MB, otherwise it does not make sense */
  799. min_md_device_sectors = (2<<10);
  800. } else {
  801. max_possible_sectors = DRBD_MAX_SECTORS;
  802. min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
  803. }
  804. if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
  805. retcode = ERR_MD_DISK_TO_SMALL;
  806. dev_warn(DEV, "refusing attach: md-device too small, "
  807. "at least %llu sectors needed for this meta-disk type\n",
  808. (unsigned long long) min_md_device_sectors);
  809. goto release_bdev2_fail;
  810. }
  811. /* Make sure the new disk is big enough
  812. * (we may currently be R_PRIMARY with no local disk...) */
  813. if (drbd_get_max_capacity(nbc) <
  814. drbd_get_capacity(mdev->this_bdev)) {
  815. retcode = ERR_DISK_TO_SMALL;
  816. goto release_bdev2_fail;
  817. }
  818. nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
  819. if (nbc->known_size > max_possible_sectors) {
  820. dev_warn(DEV, "==> truncating very big lower level device "
  821. "to currently maximum possible %llu sectors <==\n",
  822. (unsigned long long) max_possible_sectors);
  823. if (nbc->dc.meta_dev_idx >= 0)
  824. dev_warn(DEV, "==>> using internal or flexible "
  825. "meta data may help <<==\n");
  826. }
  827. drbd_suspend_io(mdev);
  828. /* also wait for the last barrier ack. */
  829. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || mdev->state.susp);
  830. /* and for any other previously queued work */
  831. drbd_flush_workqueue(mdev);
  832. retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
  833. drbd_resume_io(mdev);
  834. if (retcode < SS_SUCCESS)
  835. goto release_bdev2_fail;
  836. if (!get_ldev_if_state(mdev, D_ATTACHING))
  837. goto force_diskless;
  838. drbd_md_set_sector_offsets(mdev, nbc);
  839. /* allocate a second IO page if logical_block_size != 512 */
  840. logical_block_size = bdev_logical_block_size(nbc->md_bdev);
  841. if (logical_block_size == 0)
  842. logical_block_size = MD_SECTOR_SIZE;
  843. if (logical_block_size != MD_SECTOR_SIZE) {
  844. if (!mdev->md_io_tmpp) {
  845. struct page *page = alloc_page(GFP_NOIO);
  846. if (!page)
  847. goto force_diskless_dec;
  848. dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
  849. logical_block_size, MD_SECTOR_SIZE);
  850. dev_warn(DEV, "Workaround engaged (has performance impact).\n");
  851. mdev->md_io_tmpp = page;
  852. }
  853. }
  854. if (!mdev->bitmap) {
  855. if (drbd_bm_init(mdev)) {
  856. retcode = ERR_NOMEM;
  857. goto force_diskless_dec;
  858. }
  859. }
  860. retcode = drbd_md_read(mdev, nbc);
  861. if (retcode != NO_ERROR)
  862. goto force_diskless_dec;
  863. if (mdev->state.conn < C_CONNECTED &&
  864. mdev->state.role == R_PRIMARY &&
  865. (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
  866. dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
  867. (unsigned long long)mdev->ed_uuid);
  868. retcode = ERR_DATA_NOT_CURRENT;
  869. goto force_diskless_dec;
  870. }
  871. /* Since we are diskless, fix the activity log first... */
  872. if (drbd_check_al_size(mdev)) {
  873. retcode = ERR_NOMEM;
  874. goto force_diskless_dec;
  875. }
  876. /* Prevent shrinking of consistent devices ! */
  877. if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
  878. drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
  879. dev_warn(DEV, "refusing to truncate a consistent device\n");
  880. retcode = ERR_DISK_TO_SMALL;
  881. goto force_diskless_dec;
  882. }
  883. if (!drbd_al_read_log(mdev, nbc)) {
  884. retcode = ERR_IO_MD_DISK;
  885. goto force_diskless_dec;
  886. }
  887. /* Reset the "barriers don't work" bits here, then force meta data to
  888. * be written, to ensure we determine if barriers are supported. */
  889. if (nbc->dc.no_md_flush)
  890. set_bit(MD_NO_BARRIER, &mdev->flags);
  891. else
  892. clear_bit(MD_NO_BARRIER, &mdev->flags);
  893. /* Point of no return reached.
  894. * Devices and memory are no longer released by error cleanup below.
  895. * now mdev takes over responsibility, and the state engine should
  896. * clean it up somewhere. */
  897. D_ASSERT(mdev->ldev == NULL);
  898. mdev->ldev = nbc;
  899. mdev->resync = resync_lru;
  900. nbc = NULL;
  901. resync_lru = NULL;
  902. mdev->write_ordering = WO_bio_barrier;
  903. drbd_bump_write_ordering(mdev, WO_bio_barrier);
  904. if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
  905. set_bit(CRASHED_PRIMARY, &mdev->flags);
  906. else
  907. clear_bit(CRASHED_PRIMARY, &mdev->flags);
  908. if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  909. !(mdev->state.role == R_PRIMARY && mdev->state.susp &&
  910. mdev->sync_conf.on_no_data == OND_SUSPEND_IO)) {
  911. set_bit(CRASHED_PRIMARY, &mdev->flags);
  912. cp_discovered = 1;
  913. }
  914. mdev->send_cnt = 0;
  915. mdev->recv_cnt = 0;
  916. mdev->read_cnt = 0;
  917. mdev->writ_cnt = 0;
  918. drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE);
  919. /* If I am currently not R_PRIMARY,
  920. * but meta data primary indicator is set,
  921. * I just now recover from a hard crash,
  922. * and have been R_PRIMARY before that crash.
  923. *
  924. * Now, if I had no connection before that crash
  925. * (have been degraded R_PRIMARY), chances are that
  926. * I won't find my peer now either.
  927. *
  928. * In that case, and _only_ in that case,
  929. * we use the degr-wfc-timeout instead of the default,
  930. * so we can automatically recover from a crash of a
  931. * degraded but active "cluster" after a certain timeout.
  932. */
  933. clear_bit(USE_DEGR_WFC_T, &mdev->flags);
  934. if (mdev->state.role != R_PRIMARY &&
  935. drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  936. !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
  937. set_bit(USE_DEGR_WFC_T, &mdev->flags);
  938. dd = drbd_determin_dev_size(mdev, 0);
  939. if (dd == dev_size_error) {
  940. retcode = ERR_NOMEM_BITMAP;
  941. goto force_diskless_dec;
  942. } else if (dd == grew)
  943. set_bit(RESYNC_AFTER_NEG, &mdev->flags);
  944. if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
  945. dev_info(DEV, "Assuming that all blocks are out of sync "
  946. "(aka FullSync)\n");
  947. if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
  948. retcode = ERR_IO_MD_DISK;
  949. goto force_diskless_dec;
  950. }
  951. } else {
  952. if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
  953. retcode = ERR_IO_MD_DISK;
  954. goto force_diskless_dec;
  955. }
  956. }
  957. if (cp_discovered) {
  958. drbd_al_apply_to_bm(mdev);
  959. drbd_al_to_on_disk_bm(mdev);
  960. }
  961. spin_lock_irq(&mdev->req_lock);
  962. os = mdev->state;
  963. ns.i = os.i;
  964. /* If MDF_CONSISTENT is not set go into inconsistent state,
  965. otherwise investigate MDF_WasUpToDate...
  966. If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
  967. otherwise into D_CONSISTENT state.
  968. */
  969. if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
  970. if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
  971. ns.disk = D_CONSISTENT;
  972. else
  973. ns.disk = D_OUTDATED;
  974. } else {
  975. ns.disk = D_INCONSISTENT;
  976. }
  977. if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
  978. ns.pdsk = D_OUTDATED;
  979. if ( ns.disk == D_CONSISTENT &&
  980. (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
  981. ns.disk = D_UP_TO_DATE;
  982. /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
  983. MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
  984. this point, because drbd_request_state() modifies these
  985. flags. */
  986. /* In case we are C_CONNECTED postpone any decision on the new disk
  987. state after the negotiation phase. */
  988. if (mdev->state.conn == C_CONNECTED) {
  989. mdev->new_state_tmp.i = ns.i;
  990. ns.i = os.i;
  991. ns.disk = D_NEGOTIATING;
  992. /* We expect to receive up-to-date UUIDs soon.
  993. To avoid a race in receive_state, free p_uuid while
  994. holding req_lock. I.e. atomic with the state change */
  995. kfree(mdev->p_uuid);
  996. mdev->p_uuid = NULL;
  997. }
  998. rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
  999. ns = mdev->state;
  1000. spin_unlock_irq(&mdev->req_lock);
  1001. if (rv < SS_SUCCESS)
  1002. goto force_diskless_dec;
  1003. if (mdev->state.role == R_PRIMARY)
  1004. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  1005. else
  1006. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  1007. drbd_md_mark_dirty(mdev);
  1008. drbd_md_sync(mdev);
  1009. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1010. put_ldev(mdev);
  1011. reply->ret_code = retcode;
  1012. drbd_reconfig_done(mdev);
  1013. return 0;
  1014. force_diskless_dec:
  1015. put_ldev(mdev);
  1016. force_diskless:
  1017. drbd_force_state(mdev, NS(disk, D_DISKLESS));
  1018. drbd_md_sync(mdev);
  1019. release_bdev2_fail:
  1020. if (nbc)
  1021. bd_release(nbc->md_bdev);
  1022. release_bdev_fail:
  1023. if (nbc)
  1024. bd_release(nbc->backing_bdev);
  1025. fail:
  1026. if (nbc) {
  1027. if (nbc->lo_file)
  1028. fput(nbc->lo_file);
  1029. if (nbc->md_file)
  1030. fput(nbc->md_file);
  1031. kfree(nbc);
  1032. }
  1033. lc_destroy(resync_lru);
  1034. reply->ret_code = retcode;
  1035. drbd_reconfig_done(mdev);
  1036. return 0;
  1037. }
  1038. static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1039. struct drbd_nl_cfg_reply *reply)
  1040. {
  1041. reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
  1042. return 0;
  1043. }
  1044. static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1045. struct drbd_nl_cfg_reply *reply)
  1046. {
  1047. int i, ns;
  1048. enum drbd_ret_codes retcode;
  1049. struct net_conf *new_conf = NULL;
  1050. struct crypto_hash *tfm = NULL;
  1051. struct crypto_hash *integrity_w_tfm = NULL;
  1052. struct crypto_hash *integrity_r_tfm = NULL;
  1053. struct hlist_head *new_tl_hash = NULL;
  1054. struct hlist_head *new_ee_hash = NULL;
  1055. struct drbd_conf *odev;
  1056. char hmac_name[CRYPTO_MAX_ALG_NAME];
  1057. void *int_dig_out = NULL;
  1058. void *int_dig_in = NULL;
  1059. void *int_dig_vv = NULL;
  1060. struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
  1061. drbd_reconfig_start(mdev);
  1062. if (mdev->state.conn > C_STANDALONE) {
  1063. retcode = ERR_NET_CONFIGURED;
  1064. goto fail;
  1065. }
  1066. /* allocation not in the IO path, cqueue thread context */
  1067. new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
  1068. if (!new_conf) {
  1069. retcode = ERR_NOMEM;
  1070. goto fail;
  1071. }
  1072. new_conf->timeout = DRBD_TIMEOUT_DEF;
  1073. new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
  1074. new_conf->ping_int = DRBD_PING_INT_DEF;
  1075. new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
  1076. new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
  1077. new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
  1078. new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
  1079. new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
  1080. new_conf->ko_count = DRBD_KO_COUNT_DEF;
  1081. new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
  1082. new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
  1083. new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
  1084. new_conf->want_lose = 0;
  1085. new_conf->two_primaries = 0;
  1086. new_conf->wire_protocol = DRBD_PROT_C;
  1087. new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
  1088. new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
  1089. if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
  1090. retcode = ERR_MANDATORY_TAG;
  1091. goto fail;
  1092. }
  1093. if (new_conf->two_primaries
  1094. && (new_conf->wire_protocol != DRBD_PROT_C)) {
  1095. retcode = ERR_NOT_PROTO_C;
  1096. goto fail;
  1097. }
  1098. if (get_ldev(mdev)) {
  1099. enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
  1100. put_ldev(mdev);
  1101. if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
  1102. retcode = ERR_STONITH_AND_PROT_A;
  1103. goto fail;
  1104. }
  1105. }
  1106. if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
  1107. retcode = ERR_DISCARD;
  1108. goto fail;
  1109. }
  1110. retcode = NO_ERROR;
  1111. new_my_addr = (struct sockaddr *)&new_conf->my_addr;
  1112. new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
  1113. for (i = 0; i < minor_count; i++) {
  1114. odev = minor_to_mdev(i);
  1115. if (!odev || odev == mdev)
  1116. continue;
  1117. if (get_net_conf(odev)) {
  1118. taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
  1119. if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
  1120. !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
  1121. retcode = ERR_LOCAL_ADDR;
  1122. taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
  1123. if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
  1124. !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
  1125. retcode = ERR_PEER_ADDR;
  1126. put_net_conf(odev);
  1127. if (retcode != NO_ERROR)
  1128. goto fail;
  1129. }
  1130. }
  1131. if (new_conf->cram_hmac_alg[0] != 0) {
  1132. snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
  1133. new_conf->cram_hmac_alg);
  1134. tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
  1135. if (IS_ERR(tfm)) {
  1136. tfm = NULL;
  1137. retcode = ERR_AUTH_ALG;
  1138. goto fail;
  1139. }
  1140. if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
  1141. retcode = ERR_AUTH_ALG_ND;
  1142. goto fail;
  1143. }
  1144. }
  1145. if (new_conf->integrity_alg[0]) {
  1146. integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1147. if (IS_ERR(integrity_w_tfm)) {
  1148. integrity_w_tfm = NULL;
  1149. retcode=ERR_INTEGRITY_ALG;
  1150. goto fail;
  1151. }
  1152. if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
  1153. retcode=ERR_INTEGRITY_ALG_ND;
  1154. goto fail;
  1155. }
  1156. integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1157. if (IS_ERR(integrity_r_tfm)) {
  1158. integrity_r_tfm = NULL;
  1159. retcode=ERR_INTEGRITY_ALG;
  1160. goto fail;
  1161. }
  1162. }
  1163. ns = new_conf->max_epoch_size/8;
  1164. if (mdev->tl_hash_s != ns) {
  1165. new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
  1166. if (!new_tl_hash) {
  1167. retcode = ERR_NOMEM;
  1168. goto fail;
  1169. }
  1170. }
  1171. ns = new_conf->max_buffers/8;
  1172. if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
  1173. new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
  1174. if (!new_ee_hash) {
  1175. retcode = ERR_NOMEM;
  1176. goto fail;
  1177. }
  1178. }
  1179. ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
  1180. if (integrity_w_tfm) {
  1181. i = crypto_hash_digestsize(integrity_w_tfm);
  1182. int_dig_out = kmalloc(i, GFP_KERNEL);
  1183. if (!int_dig_out) {
  1184. retcode = ERR_NOMEM;
  1185. goto fail;
  1186. }
  1187. int_dig_in = kmalloc(i, GFP_KERNEL);
  1188. if (!int_dig_in) {
  1189. retcode = ERR_NOMEM;
  1190. goto fail;
  1191. }
  1192. int_dig_vv = kmalloc(i, GFP_KERNEL);
  1193. if (!int_dig_vv) {
  1194. retcode = ERR_NOMEM;
  1195. goto fail;
  1196. }
  1197. }
  1198. if (!mdev->bitmap) {
  1199. if(drbd_bm_init(mdev)) {
  1200. retcode = ERR_NOMEM;
  1201. goto fail;
  1202. }
  1203. }
  1204. drbd_flush_workqueue(mdev);
  1205. spin_lock_irq(&mdev->req_lock);
  1206. if (mdev->net_conf != NULL) {
  1207. retcode = ERR_NET_CONFIGURED;
  1208. spin_unlock_irq(&mdev->req_lock);
  1209. goto fail;
  1210. }
  1211. mdev->net_conf = new_conf;
  1212. mdev->send_cnt = 0;
  1213. mdev->recv_cnt = 0;
  1214. if (new_tl_hash) {
  1215. kfree(mdev->tl_hash);
  1216. mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
  1217. mdev->tl_hash = new_tl_hash;
  1218. }
  1219. if (new_ee_hash) {
  1220. kfree(mdev->ee_hash);
  1221. mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
  1222. mdev->ee_hash = new_ee_hash;
  1223. }
  1224. crypto_free_hash(mdev->cram_hmac_tfm);
  1225. mdev->cram_hmac_tfm = tfm;
  1226. crypto_free_hash(mdev->integrity_w_tfm);
  1227. mdev->integrity_w_tfm = integrity_w_tfm;
  1228. crypto_free_hash(mdev->integrity_r_tfm);
  1229. mdev->integrity_r_tfm = integrity_r_tfm;
  1230. kfree(mdev->int_dig_out);
  1231. kfree(mdev->int_dig_in);
  1232. kfree(mdev->int_dig_vv);
  1233. mdev->int_dig_out=int_dig_out;
  1234. mdev->int_dig_in=int_dig_in;
  1235. mdev->int_dig_vv=int_dig_vv;
  1236. retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
  1237. spin_unlock_irq(&mdev->req_lock);
  1238. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1239. reply->ret_code = retcode;
  1240. drbd_reconfig_done(mdev);
  1241. return 0;
  1242. fail:
  1243. kfree(int_dig_out);
  1244. kfree(int_dig_in);
  1245. kfree(int_dig_vv);
  1246. crypto_free_hash(tfm);
  1247. crypto_free_hash(integrity_w_tfm);
  1248. crypto_free_hash(integrity_r_tfm);
  1249. kfree(new_tl_hash);
  1250. kfree(new_ee_hash);
  1251. kfree(new_conf);
  1252. reply->ret_code = retcode;
  1253. drbd_reconfig_done(mdev);
  1254. return 0;
  1255. }
  1256. static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1257. struct drbd_nl_cfg_reply *reply)
  1258. {
  1259. int retcode;
  1260. retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
  1261. if (retcode == SS_NOTHING_TO_DO)
  1262. goto done;
  1263. else if (retcode == SS_ALREADY_STANDALONE)
  1264. goto done;
  1265. else if (retcode == SS_PRIMARY_NOP) {
  1266. /* Our statche checking code wants to see the peer outdated. */
  1267. retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
  1268. pdsk, D_OUTDATED));
  1269. } else if (retcode == SS_CW_FAILED_BY_PEER) {
  1270. /* The peer probably wants to see us outdated. */
  1271. retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
  1272. disk, D_OUTDATED),
  1273. CS_ORDERED);
  1274. if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
  1275. drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
  1276. retcode = SS_SUCCESS;
  1277. }
  1278. }
  1279. if (retcode < SS_SUCCESS)
  1280. goto fail;
  1281. if (wait_event_interruptible(mdev->state_wait,
  1282. mdev->state.conn != C_DISCONNECTING)) {
  1283. /* Do not test for mdev->state.conn == C_STANDALONE, since
  1284. someone else might connect us in the mean time! */
  1285. retcode = ERR_INTR;
  1286. goto fail;
  1287. }
  1288. done:
  1289. retcode = NO_ERROR;
  1290. fail:
  1291. drbd_md_sync(mdev);
  1292. reply->ret_code = retcode;
  1293. return 0;
  1294. }
  1295. void resync_after_online_grow(struct drbd_conf *mdev)
  1296. {
  1297. int iass; /* I am sync source */
  1298. dev_info(DEV, "Resync of new storage after online grow\n");
  1299. if (mdev->state.role != mdev->state.peer)
  1300. iass = (mdev->state.role == R_PRIMARY);
  1301. else
  1302. iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
  1303. if (iass)
  1304. drbd_start_resync(mdev, C_SYNC_SOURCE);
  1305. else
  1306. _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
  1307. }
  1308. static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1309. struct drbd_nl_cfg_reply *reply)
  1310. {
  1311. struct resize rs;
  1312. int retcode = NO_ERROR;
  1313. enum determine_dev_size dd;
  1314. enum dds_flags ddsf;
  1315. memset(&rs, 0, sizeof(struct resize));
  1316. if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
  1317. retcode = ERR_MANDATORY_TAG;
  1318. goto fail;
  1319. }
  1320. if (mdev->state.conn > C_CONNECTED) {
  1321. retcode = ERR_RESIZE_RESYNC;
  1322. goto fail;
  1323. }
  1324. if (mdev->state.role == R_SECONDARY &&
  1325. mdev->state.peer == R_SECONDARY) {
  1326. retcode = ERR_NO_PRIMARY;
  1327. goto fail;
  1328. }
  1329. if (!get_ldev(mdev)) {
  1330. retcode = ERR_NO_DISK;
  1331. goto fail;
  1332. }
  1333. if (rs.no_resync && mdev->agreed_pro_version < 93) {
  1334. retcode = ERR_NEED_APV_93;
  1335. goto fail;
  1336. }
  1337. if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
  1338. mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
  1339. mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
  1340. ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
  1341. dd = drbd_determin_dev_size(mdev, ddsf);
  1342. drbd_md_sync(mdev);
  1343. put_ldev(mdev);
  1344. if (dd == dev_size_error) {
  1345. retcode = ERR_NOMEM_BITMAP;
  1346. goto fail;
  1347. }
  1348. if (mdev->state.conn == C_CONNECTED) {
  1349. if (dd == grew)
  1350. set_bit(RESIZE_PENDING, &mdev->flags);
  1351. drbd_send_uuids(mdev);
  1352. drbd_send_sizes(mdev, 1, ddsf);
  1353. }
  1354. fail:
  1355. reply->ret_code = retcode;
  1356. return 0;
  1357. }
  1358. static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1359. struct drbd_nl_cfg_reply *reply)
  1360. {
  1361. int retcode = NO_ERROR;
  1362. int err;
  1363. int ovr; /* online verify running */
  1364. int rsr; /* re-sync running */
  1365. struct crypto_hash *verify_tfm = NULL;
  1366. struct crypto_hash *csums_tfm = NULL;
  1367. struct syncer_conf sc;
  1368. cpumask_var_t new_cpu_mask;
  1369. if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
  1370. retcode = ERR_NOMEM;
  1371. goto fail;
  1372. }
  1373. if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
  1374. memset(&sc, 0, sizeof(struct syncer_conf));
  1375. sc.rate = DRBD_RATE_DEF;
  1376. sc.after = DRBD_AFTER_DEF;
  1377. sc.al_extents = DRBD_AL_EXTENTS_DEF;
  1378. sc.on_no_data = DRBD_ON_NO_DATA_DEF;
  1379. } else
  1380. memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
  1381. if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
  1382. retcode = ERR_MANDATORY_TAG;
  1383. goto fail;
  1384. }
  1385. /* re-sync running */
  1386. rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
  1387. mdev->state.conn == C_SYNC_TARGET ||
  1388. mdev->state.conn == C_PAUSED_SYNC_S ||
  1389. mdev->state.conn == C_PAUSED_SYNC_T );
  1390. if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
  1391. retcode = ERR_CSUMS_RESYNC_RUNNING;
  1392. goto fail;
  1393. }
  1394. if (!rsr && sc.csums_alg[0]) {
  1395. csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
  1396. if (IS_ERR(csums_tfm)) {
  1397. csums_tfm = NULL;
  1398. retcode = ERR_CSUMS_ALG;
  1399. goto fail;
  1400. }
  1401. if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
  1402. retcode = ERR_CSUMS_ALG_ND;
  1403. goto fail;
  1404. }
  1405. }
  1406. /* online verify running */
  1407. ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
  1408. if (ovr) {
  1409. if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
  1410. retcode = ERR_VERIFY_RUNNING;
  1411. goto fail;
  1412. }
  1413. }
  1414. if (!ovr && sc.verify_alg[0]) {
  1415. verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
  1416. if (IS_ERR(verify_tfm)) {
  1417. verify_tfm = NULL;
  1418. retcode = ERR_VERIFY_ALG;
  1419. goto fail;
  1420. }
  1421. if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
  1422. retcode = ERR_VERIFY_ALG_ND;
  1423. goto fail;
  1424. }
  1425. }
  1426. /* silently ignore cpu mask on UP kernel */
  1427. if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
  1428. err = __bitmap_parse(sc.cpu_mask, 32, 0,
  1429. cpumask_bits(new_cpu_mask), nr_cpu_ids);
  1430. if (err) {
  1431. dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
  1432. retcode = ERR_CPU_MASK_PARSE;
  1433. goto fail;
  1434. }
  1435. }
  1436. ERR_IF (sc.rate < 1) sc.rate = 1;
  1437. ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
  1438. #define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
  1439. if (sc.al_extents > AL_MAX) {
  1440. dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
  1441. sc.al_extents = AL_MAX;
  1442. }
  1443. #undef AL_MAX
  1444. /* most sanity checks done, try to assign the new sync-after
  1445. * dependency. need to hold the global lock in there,
  1446. * to avoid a race in the dependency loop check. */
  1447. retcode = drbd_alter_sa(mdev, sc.after);
  1448. if (retcode != NO_ERROR)
  1449. goto fail;
  1450. /* ok, assign the rest of it as well.
  1451. * lock against receive_SyncParam() */
  1452. spin_lock(&mdev->peer_seq_lock);
  1453. mdev->sync_conf = sc;
  1454. if (!rsr) {
  1455. crypto_free_hash(mdev->csums_tfm);
  1456. mdev->csums_tfm = csums_tfm;
  1457. csums_tfm = NULL;
  1458. }
  1459. if (!ovr) {
  1460. crypto_free_hash(mdev->verify_tfm);
  1461. mdev->verify_tfm = verify_tfm;
  1462. verify_tfm = NULL;
  1463. }
  1464. spin_unlock(&mdev->peer_seq_lock);
  1465. if (get_ldev(mdev)) {
  1466. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  1467. drbd_al_shrink(mdev);
  1468. err = drbd_check_al_size(mdev);
  1469. lc_unlock(mdev->act_log);
  1470. wake_up(&mdev->al_wait);
  1471. put_ldev(mdev);
  1472. drbd_md_sync(mdev);
  1473. if (err) {
  1474. retcode = ERR_NOMEM;
  1475. goto fail;
  1476. }
  1477. }
  1478. if (mdev->state.conn >= C_CONNECTED)
  1479. drbd_send_sync_param(mdev, &sc);
  1480. if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
  1481. cpumask_copy(mdev->cpu_mask, new_cpu_mask);
  1482. drbd_calc_cpu_mask(mdev);
  1483. mdev->receiver.reset_cpu_mask = 1;
  1484. mdev->asender.reset_cpu_mask = 1;
  1485. mdev->worker.reset_cpu_mask = 1;
  1486. }
  1487. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1488. fail:
  1489. free_cpumask_var(new_cpu_mask);
  1490. crypto_free_hash(csums_tfm);
  1491. crypto_free_hash(verify_tfm);
  1492. reply->ret_code = retcode;
  1493. return 0;
  1494. }
  1495. static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1496. struct drbd_nl_cfg_reply *reply)
  1497. {
  1498. int retcode;
  1499. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
  1500. if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
  1501. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1502. while (retcode == SS_NEED_CONNECTION) {
  1503. spin_lock_irq(&mdev->req_lock);
  1504. if (mdev->state.conn < C_CONNECTED)
  1505. retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
  1506. spin_unlock_irq(&mdev->req_lock);
  1507. if (retcode != SS_NEED_CONNECTION)
  1508. break;
  1509. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1510. }
  1511. reply->ret_code = retcode;
  1512. return 0;
  1513. }
  1514. static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1515. struct drbd_nl_cfg_reply *reply)
  1516. {
  1517. reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
  1518. return 0;
  1519. }
  1520. static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1521. struct drbd_nl_cfg_reply *reply)
  1522. {
  1523. int retcode = NO_ERROR;
  1524. if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
  1525. retcode = ERR_PAUSE_IS_SET;
  1526. reply->ret_code = retcode;
  1527. return 0;
  1528. }
  1529. static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1530. struct drbd_nl_cfg_reply *reply)
  1531. {
  1532. int retcode = NO_ERROR;
  1533. if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO)
  1534. retcode = ERR_PAUSE_IS_CLEAR;
  1535. reply->ret_code = retcode;
  1536. return 0;
  1537. }
  1538. static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1539. struct drbd_nl_cfg_reply *reply)
  1540. {
  1541. reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
  1542. return 0;
  1543. }
  1544. static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1545. struct drbd_nl_cfg_reply *reply)
  1546. {
  1547. if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
  1548. drbd_uuid_new_current(mdev);
  1549. clear_bit(NEW_CUR_UUID, &mdev->flags);
  1550. drbd_md_sync(mdev);
  1551. }
  1552. drbd_suspend_io(mdev);
  1553. reply->ret_code = drbd_request_state(mdev, NS(susp, 0));
  1554. if (reply->ret_code == SS_SUCCESS) {
  1555. if (mdev->state.conn < C_CONNECTED)
  1556. tl_clear(mdev);
  1557. if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
  1558. tl_restart(mdev, fail_frozen_disk_io);
  1559. }
  1560. drbd_resume_io(mdev);
  1561. return 0;
  1562. }
  1563. static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1564. struct drbd_nl_cfg_reply *reply)
  1565. {
  1566. reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
  1567. return 0;
  1568. }
  1569. static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1570. struct drbd_nl_cfg_reply *reply)
  1571. {
  1572. unsigned short *tl;
  1573. tl = reply->tag_list;
  1574. if (get_ldev(mdev)) {
  1575. tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
  1576. put_ldev(mdev);
  1577. }
  1578. if (get_net_conf(mdev)) {
  1579. tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
  1580. put_net_conf(mdev);
  1581. }
  1582. tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
  1583. put_unaligned(TT_END, tl++); /* Close the tag list */
  1584. return (int)((char *)tl - (char *)reply->tag_list);
  1585. }
  1586. static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1587. struct drbd_nl_cfg_reply *reply)
  1588. {
  1589. unsigned short *tl = reply->tag_list;
  1590. union drbd_state s = mdev->state;
  1591. unsigned long rs_left;
  1592. unsigned int res;
  1593. tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
  1594. /* no local ref, no bitmap, no syncer progress. */
  1595. if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
  1596. if (get_ldev(mdev)) {
  1597. drbd_get_syncer_progress(mdev, &rs_left, &res);
  1598. tl = tl_add_int(tl, T_sync_progress, &res);
  1599. put_ldev(mdev);
  1600. }
  1601. }
  1602. put_unaligned(TT_END, tl++); /* Close the tag list */
  1603. return (int)((char *)tl - (char *)reply->tag_list);
  1604. }
  1605. static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1606. struct drbd_nl_cfg_reply *reply)
  1607. {
  1608. unsigned short *tl;
  1609. tl = reply->tag_list;
  1610. if (get_ldev(mdev)) {
  1611. tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
  1612. tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
  1613. put_ldev(mdev);
  1614. }
  1615. put_unaligned(TT_END, tl++); /* Close the tag list */
  1616. return (int)((char *)tl - (char *)reply->tag_list);
  1617. }
  1618. /**
  1619. * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
  1620. * @mdev: DRBD device.
  1621. * @nlp: Netlink/connector packet from drbdsetup
  1622. * @reply: Reply packet for drbdsetup
  1623. */
  1624. static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1625. struct drbd_nl_cfg_reply *reply)
  1626. {
  1627. unsigned short *tl;
  1628. char rv;
  1629. tl = reply->tag_list;
  1630. rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
  1631. test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
  1632. tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
  1633. put_unaligned(TT_END, tl++); /* Close the tag list */
  1634. return (int)((char *)tl - (char *)reply->tag_list);
  1635. }
  1636. static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1637. struct drbd_nl_cfg_reply *reply)
  1638. {
  1639. /* default to resume from last known position, if possible */
  1640. struct start_ov args =
  1641. { .start_sector = mdev->ov_start_sector };
  1642. if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
  1643. reply->ret_code = ERR_MANDATORY_TAG;
  1644. return 0;
  1645. }
  1646. /* w_make_ov_request expects position to be aligned */
  1647. mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
  1648. reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
  1649. return 0;
  1650. }
  1651. static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1652. struct drbd_nl_cfg_reply *reply)
  1653. {
  1654. int retcode = NO_ERROR;
  1655. int skip_initial_sync = 0;
  1656. int err;
  1657. struct new_c_uuid args;
  1658. memset(&args, 0, sizeof(struct new_c_uuid));
  1659. if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
  1660. reply->ret_code = ERR_MANDATORY_TAG;
  1661. return 0;
  1662. }
  1663. mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
  1664. if (!get_ldev(mdev)) {
  1665. retcode = ERR_NO_DISK;
  1666. goto out;
  1667. }
  1668. /* this is "skip initial sync", assume to be clean */
  1669. if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
  1670. mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
  1671. dev_info(DEV, "Preparing to skip initial sync\n");
  1672. skip_initial_sync = 1;
  1673. } else if (mdev->state.conn != C_STANDALONE) {
  1674. retcode = ERR_CONNECTED;
  1675. goto out_dec;
  1676. }
  1677. drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
  1678. drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
  1679. if (args.clear_bm) {
  1680. err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
  1681. if (err) {
  1682. dev_err(DEV, "Writing bitmap failed with %d\n",err);
  1683. retcode = ERR_IO_MD_DISK;
  1684. }
  1685. if (skip_initial_sync) {
  1686. drbd_send_uuids_skip_initial_sync(mdev);
  1687. _drbd_uuid_set(mdev, UI_BITMAP, 0);
  1688. spin_lock_irq(&mdev->req_lock);
  1689. _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
  1690. CS_VERBOSE, NULL);
  1691. spin_unlock_irq(&mdev->req_lock);
  1692. }
  1693. }
  1694. drbd_md_sync(mdev);
  1695. out_dec:
  1696. put_ldev(mdev);
  1697. out:
  1698. mutex_unlock(&mdev->state_mutex);
  1699. reply->ret_code = retcode;
  1700. return 0;
  1701. }
  1702. static struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp)
  1703. {
  1704. struct drbd_conf *mdev;
  1705. if (nlp->drbd_minor >= minor_count)
  1706. return NULL;
  1707. mdev = minor_to_mdev(nlp->drbd_minor);
  1708. if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) {
  1709. struct gendisk *disk = NULL;
  1710. mdev = drbd_new_device(nlp->drbd_minor);
  1711. spin_lock_irq(&drbd_pp_lock);
  1712. if (minor_table[nlp->drbd_minor] == NULL) {
  1713. minor_table[nlp->drbd_minor] = mdev;
  1714. disk = mdev->vdisk;
  1715. mdev = NULL;
  1716. } /* else: we lost the race */
  1717. spin_unlock_irq(&drbd_pp_lock);
  1718. if (disk) /* we won the race above */
  1719. /* in case we ever add a drbd_delete_device(),
  1720. * don't forget the del_gendisk! */
  1721. add_disk(disk);
  1722. else /* we lost the race above */
  1723. drbd_free_mdev(mdev);
  1724. mdev = minor_to_mdev(nlp->drbd_minor);
  1725. }
  1726. return mdev;
  1727. }
  1728. struct cn_handler_struct {
  1729. int (*function)(struct drbd_conf *,
  1730. struct drbd_nl_cfg_req *,
  1731. struct drbd_nl_cfg_reply *);
  1732. int reply_body_size;
  1733. };
  1734. static struct cn_handler_struct cnd_table[] = {
  1735. [ P_primary ] = { &drbd_nl_primary, 0 },
  1736. [ P_secondary ] = { &drbd_nl_secondary, 0 },
  1737. [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
  1738. [ P_detach ] = { &drbd_nl_detach, 0 },
  1739. [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
  1740. [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
  1741. [ P_resize ] = { &drbd_nl_resize, 0 },
  1742. [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
  1743. [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
  1744. [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
  1745. [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
  1746. [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
  1747. [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
  1748. [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
  1749. [ P_outdate ] = { &drbd_nl_outdate, 0 },
  1750. [ P_get_config ] = { &drbd_nl_get_config,
  1751. sizeof(struct syncer_conf_tag_len_struct) +
  1752. sizeof(struct disk_conf_tag_len_struct) +
  1753. sizeof(struct net_conf_tag_len_struct) },
  1754. [ P_get_state ] = { &drbd_nl_get_state,
  1755. sizeof(struct get_state_tag_len_struct) +
  1756. sizeof(struct sync_progress_tag_len_struct) },
  1757. [ P_get_uuids ] = { &drbd_nl_get_uuids,
  1758. sizeof(struct get_uuids_tag_len_struct) },
  1759. [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
  1760. sizeof(struct get_timeout_flag_tag_len_struct)},
  1761. [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
  1762. [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
  1763. };
  1764. static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
  1765. {
  1766. struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
  1767. struct cn_handler_struct *cm;
  1768. struct cn_msg *cn_reply;
  1769. struct drbd_nl_cfg_reply *reply;
  1770. struct drbd_conf *mdev;
  1771. int retcode, rr;
  1772. int reply_size = sizeof(struct cn_msg)
  1773. + sizeof(struct drbd_nl_cfg_reply)
  1774. + sizeof(short int);
  1775. if (!try_module_get(THIS_MODULE)) {
  1776. printk(KERN_ERR "drbd: try_module_get() failed!\n");
  1777. return;
  1778. }
  1779. if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
  1780. retcode = ERR_PERM;
  1781. goto fail;
  1782. }
  1783. mdev = ensure_mdev(nlp);
  1784. if (!mdev) {
  1785. retcode = ERR_MINOR_INVALID;
  1786. goto fail;
  1787. }
  1788. if (nlp->packet_type >= P_nl_after_last_packet) {
  1789. retcode = ERR_PACKET_NR;
  1790. goto fail;
  1791. }
  1792. cm = cnd_table + nlp->packet_type;
  1793. /* This may happen if packet number is 0: */
  1794. if (cm->function == NULL) {
  1795. retcode = ERR_PACKET_NR;
  1796. goto fail;
  1797. }
  1798. reply_size += cm->reply_body_size;
  1799. /* allocation not in the IO path, cqueue thread context */
  1800. cn_reply = kmalloc(reply_size, GFP_KERNEL);
  1801. if (!cn_reply) {
  1802. retcode = ERR_NOMEM;
  1803. goto fail;
  1804. }
  1805. reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
  1806. reply->packet_type =
  1807. cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
  1808. reply->minor = nlp->drbd_minor;
  1809. reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
  1810. /* reply->tag_list; might be modified by cm->function. */
  1811. rr = cm->function(mdev, nlp, reply);
  1812. cn_reply->id = req->id;
  1813. cn_reply->seq = req->seq;
  1814. cn_reply->ack = req->ack + 1;
  1815. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
  1816. cn_reply->flags = 0;
  1817. rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
  1818. if (rr && rr != -ESRCH)
  1819. printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
  1820. kfree(cn_reply);
  1821. module_put(THIS_MODULE);
  1822. return;
  1823. fail:
  1824. drbd_nl_send_reply(req, retcode);
  1825. module_put(THIS_MODULE);
  1826. }
  1827. static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
  1828. static unsigned short *
  1829. __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
  1830. unsigned short len, int nul_terminated)
  1831. {
  1832. unsigned short l = tag_descriptions[tag_number(tag)].max_len;
  1833. len = (len < l) ? len : l;
  1834. put_unaligned(tag, tl++);
  1835. put_unaligned(len, tl++);
  1836. memcpy(tl, data, len);
  1837. tl = (unsigned short*)((char*)tl + len);
  1838. if (nul_terminated)
  1839. *((char*)tl - 1) = 0;
  1840. return tl;
  1841. }
  1842. static unsigned short *
  1843. tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
  1844. {
  1845. return __tl_add_blob(tl, tag, data, len, 0);
  1846. }
  1847. static unsigned short *
  1848. tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
  1849. {
  1850. return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
  1851. }
  1852. static unsigned short *
  1853. tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
  1854. {
  1855. put_unaligned(tag, tl++);
  1856. switch(tag_type(tag)) {
  1857. case TT_INTEGER:
  1858. put_unaligned(sizeof(int), tl++);
  1859. put_unaligned(*(int *)val, (int *)tl);
  1860. tl = (unsigned short*)((char*)tl+sizeof(int));
  1861. break;
  1862. case TT_INT64:
  1863. put_unaligned(sizeof(u64), tl++);
  1864. put_unaligned(*(u64 *)val, (u64 *)tl);
  1865. tl = (unsigned short*)((char*)tl+sizeof(u64));
  1866. break;
  1867. default:
  1868. /* someone did something stupid. */
  1869. ;
  1870. }
  1871. return tl;
  1872. }
  1873. void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
  1874. {
  1875. char buffer[sizeof(struct cn_msg)+
  1876. sizeof(struct drbd_nl_cfg_reply)+
  1877. sizeof(struct get_state_tag_len_struct)+
  1878. sizeof(short int)];
  1879. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  1880. struct drbd_nl_cfg_reply *reply =
  1881. (struct drbd_nl_cfg_reply *)cn_reply->data;
  1882. unsigned short *tl = reply->tag_list;
  1883. /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
  1884. tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
  1885. put_unaligned(TT_END, tl++); /* Close the tag list */
  1886. cn_reply->id.idx = CN_IDX_DRBD;
  1887. cn_reply->id.val = CN_VAL_DRBD;
  1888. cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
  1889. cn_reply->ack = 0; /* not used here. */
  1890. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  1891. (int)((char *)tl - (char *)reply->tag_list);
  1892. cn_reply->flags = 0;
  1893. reply->packet_type = P_get_state;
  1894. reply->minor = mdev_to_minor(mdev);
  1895. reply->ret_code = NO_ERROR;
  1896. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  1897. }
  1898. void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
  1899. {
  1900. char buffer[sizeof(struct cn_msg)+
  1901. sizeof(struct drbd_nl_cfg_reply)+
  1902. sizeof(struct call_helper_tag_len_struct)+
  1903. sizeof(short int)];
  1904. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  1905. struct drbd_nl_cfg_reply *reply =
  1906. (struct drbd_nl_cfg_reply *)cn_reply->data;
  1907. unsigned short *tl = reply->tag_list;
  1908. /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
  1909. tl = tl_add_str(tl, T_helper, helper_name);
  1910. put_unaligned(TT_END, tl++); /* Close the tag list */
  1911. cn_reply->id.idx = CN_IDX_DRBD;
  1912. cn_reply->id.val = CN_VAL_DRBD;
  1913. cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
  1914. cn_reply->ack = 0; /* not used here. */
  1915. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  1916. (int)((char *)tl - (char *)reply->tag_list);
  1917. cn_reply->flags = 0;
  1918. reply->packet_type = P_call_helper;
  1919. reply->minor = mdev_to_minor(mdev);
  1920. reply->ret_code = NO_ERROR;
  1921. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  1922. }
  1923. void drbd_bcast_ee(struct drbd_conf *mdev,
  1924. const char *reason, const int dgs,
  1925. const char* seen_hash, const char* calc_hash,
  1926. const struct drbd_epoch_entry* e)
  1927. {
  1928. struct cn_msg *cn_reply;
  1929. struct drbd_nl_cfg_reply *reply;
  1930. unsigned short *tl;
  1931. struct page *page;
  1932. unsigned len;
  1933. if (!e)
  1934. return;
  1935. if (!reason || !reason[0])
  1936. return;
  1937. /* apparently we have to memcpy twice, first to prepare the data for the
  1938. * struct cn_msg, then within cn_netlink_send from the cn_msg to the
  1939. * netlink skb. */
  1940. /* receiver thread context, which is not in the writeout path (of this node),
  1941. * but may be in the writeout path of the _other_ node.
  1942. * GFP_NOIO to avoid potential "distributed deadlock". */
  1943. cn_reply = kmalloc(
  1944. sizeof(struct cn_msg)+
  1945. sizeof(struct drbd_nl_cfg_reply)+
  1946. sizeof(struct dump_ee_tag_len_struct)+
  1947. sizeof(short int),
  1948. GFP_NOIO);
  1949. if (!cn_reply) {
  1950. dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
  1951. (unsigned long long)e->sector, e->size);
  1952. return;
  1953. }
  1954. reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
  1955. tl = reply->tag_list;
  1956. tl = tl_add_str(tl, T_dump_ee_reason, reason);
  1957. tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
  1958. tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
  1959. tl = tl_add_int(tl, T_ee_sector, &e->sector);
  1960. tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
  1961. put_unaligned(T_ee_data, tl++);
  1962. put_unaligned(e->size, tl++);
  1963. len = e->size;
  1964. page = e->pages;
  1965. page_chain_for_each(page) {
  1966. void *d = kmap_atomic(page, KM_USER0);
  1967. unsigned l = min_t(unsigned, len, PAGE_SIZE);
  1968. memcpy(tl, d, l);
  1969. kunmap_atomic(d, KM_USER0);
  1970. tl = (unsigned short*)((char*)tl + l);
  1971. len -= l;
  1972. }
  1973. put_unaligned(TT_END, tl++); /* Close the tag list */
  1974. cn_reply->id.idx = CN_IDX_DRBD;
  1975. cn_reply->id.val = CN_VAL_DRBD;
  1976. cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
  1977. cn_reply->ack = 0; // not used here.
  1978. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  1979. (int)((char*)tl - (char*)reply->tag_list);
  1980. cn_reply->flags = 0;
  1981. reply->packet_type = P_dump_ee;
  1982. reply->minor = mdev_to_minor(mdev);
  1983. reply->ret_code = NO_ERROR;
  1984. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  1985. kfree(cn_reply);
  1986. }
  1987. void drbd_bcast_sync_progress(struct drbd_conf *mdev)
  1988. {
  1989. char buffer[sizeof(struct cn_msg)+
  1990. sizeof(struct drbd_nl_cfg_reply)+
  1991. sizeof(struct sync_progress_tag_len_struct)+
  1992. sizeof(short int)];
  1993. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  1994. struct drbd_nl_cfg_reply *reply =
  1995. (struct drbd_nl_cfg_reply *)cn_reply->data;
  1996. unsigned short *tl = reply->tag_list;
  1997. unsigned long rs_left;
  1998. unsigned int res;
  1999. /* no local ref, no bitmap, no syncer progress, no broadcast. */
  2000. if (!get_ldev(mdev))
  2001. return;
  2002. drbd_get_syncer_progress(mdev, &rs_left, &res);
  2003. put_ldev(mdev);
  2004. tl = tl_add_int(tl, T_sync_progress, &res);
  2005. put_unaligned(TT_END, tl++); /* Close the tag list */
  2006. cn_reply->id.idx = CN_IDX_DRBD;
  2007. cn_reply->id.val = CN_VAL_DRBD;
  2008. cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
  2009. cn_reply->ack = 0; /* not used here. */
  2010. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  2011. (int)((char *)tl - (char *)reply->tag_list);
  2012. cn_reply->flags = 0;
  2013. reply->packet_type = P_sync_progress;
  2014. reply->minor = mdev_to_minor(mdev);
  2015. reply->ret_code = NO_ERROR;
  2016. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2017. }
  2018. int __init drbd_nl_init(void)
  2019. {
  2020. static struct cb_id cn_id_drbd;
  2021. int err, try=10;
  2022. cn_id_drbd.val = CN_VAL_DRBD;
  2023. do {
  2024. cn_id_drbd.idx = cn_idx;
  2025. err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
  2026. if (!err)
  2027. break;
  2028. cn_idx = (cn_idx + CN_IDX_STEP);
  2029. } while (try--);
  2030. if (err) {
  2031. printk(KERN_ERR "drbd: cn_drbd failed to register\n");
  2032. return err;
  2033. }
  2034. return 0;
  2035. }
  2036. void drbd_nl_cleanup(void)
  2037. {
  2038. static struct cb_id cn_id_drbd;
  2039. cn_id_drbd.idx = cn_idx;
  2040. cn_id_drbd.val = CN_VAL_DRBD;
  2041. cn_del_callback(&cn_id_drbd);
  2042. }
  2043. void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
  2044. {
  2045. char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
  2046. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  2047. struct drbd_nl_cfg_reply *reply =
  2048. (struct drbd_nl_cfg_reply *)cn_reply->data;
  2049. int rr;
  2050. cn_reply->id = req->id;
  2051. cn_reply->seq = req->seq;
  2052. cn_reply->ack = req->ack + 1;
  2053. cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
  2054. cn_reply->flags = 0;
  2055. reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
  2056. reply->ret_code = ret_code;
  2057. rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2058. if (rr && rr != -ESRCH)
  2059. printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
  2060. }