drbd_nl.c 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554
  1. /*
  2. drbd_nl.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/drbd.h>
  21. #include <linux/in.h>
  22. #include <linux/fs.h>
  23. #include <linux/file.h>
  24. #include <linux/slab.h>
  25. #include <linux/connector.h>
  26. #include <linux/blkpg.h>
  27. #include <linux/cpumask.h>
  28. #include "drbd_int.h"
  29. #include "drbd_req.h"
  30. #include "drbd_wrappers.h"
  31. #include <asm/unaligned.h>
  32. #include <linux/drbd_tag_magic.h>
  33. #include <linux/drbd_limits.h>
  34. #include <linux/compiler.h>
  35. #include <linux/kthread.h>
  36. static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int);
  37. static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *);
  38. static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *);
  39. /* see get_sb_bdev and bd_claim */
  40. static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
  41. /* Generate the tag_list to struct functions */
  42. #define NL_PACKET(name, number, fields) \
  43. static int name ## _from_tags(struct drbd_conf *mdev, \
  44. unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
  45. static int name ## _from_tags(struct drbd_conf *mdev, \
  46. unsigned short *tags, struct name *arg) \
  47. { \
  48. int tag; \
  49. int dlen; \
  50. \
  51. while ((tag = get_unaligned(tags++)) != TT_END) { \
  52. dlen = get_unaligned(tags++); \
  53. switch (tag_number(tag)) { \
  54. fields \
  55. default: \
  56. if (tag & T_MANDATORY) { \
  57. dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
  58. return 0; \
  59. } \
  60. } \
  61. tags = (unsigned short *)((char *)tags + dlen); \
  62. } \
  63. return 1; \
  64. }
  65. #define NL_INTEGER(pn, pr, member) \
  66. case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
  67. arg->member = get_unaligned((int *)(tags)); \
  68. break;
  69. #define NL_INT64(pn, pr, member) \
  70. case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
  71. arg->member = get_unaligned((u64 *)(tags)); \
  72. break;
  73. #define NL_BIT(pn, pr, member) \
  74. case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
  75. arg->member = *(char *)(tags) ? 1 : 0; \
  76. break;
  77. #define NL_STRING(pn, pr, member, len) \
  78. case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
  79. if (dlen > len) { \
  80. dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
  81. #member, dlen, (unsigned int)len); \
  82. return 0; \
  83. } \
  84. arg->member ## _len = dlen; \
  85. memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
  86. break;
  87. #include "linux/drbd_nl.h"
  88. /* Generate the struct to tag_list functions */
  89. #define NL_PACKET(name, number, fields) \
  90. static unsigned short* \
  91. name ## _to_tags(struct drbd_conf *mdev, \
  92. struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
  93. static unsigned short* \
  94. name ## _to_tags(struct drbd_conf *mdev, \
  95. struct name *arg, unsigned short *tags) \
  96. { \
  97. fields \
  98. return tags; \
  99. }
  100. #define NL_INTEGER(pn, pr, member) \
  101. put_unaligned(pn | pr | TT_INTEGER, tags++); \
  102. put_unaligned(sizeof(int), tags++); \
  103. put_unaligned(arg->member, (int *)tags); \
  104. tags = (unsigned short *)((char *)tags+sizeof(int));
  105. #define NL_INT64(pn, pr, member) \
  106. put_unaligned(pn | pr | TT_INT64, tags++); \
  107. put_unaligned(sizeof(u64), tags++); \
  108. put_unaligned(arg->member, (u64 *)tags); \
  109. tags = (unsigned short *)((char *)tags+sizeof(u64));
  110. #define NL_BIT(pn, pr, member) \
  111. put_unaligned(pn | pr | TT_BIT, tags++); \
  112. put_unaligned(sizeof(char), tags++); \
  113. *(char *)tags = arg->member; \
  114. tags = (unsigned short *)((char *)tags+sizeof(char));
  115. #define NL_STRING(pn, pr, member, len) \
  116. put_unaligned(pn | pr | TT_STRING, tags++); \
  117. put_unaligned(arg->member ## _len, tags++); \
  118. memcpy(tags, arg->member, arg->member ## _len); \
  119. tags = (unsigned short *)((char *)tags + arg->member ## _len);
  120. #include "linux/drbd_nl.h"
  121. void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name);
  122. void drbd_nl_send_reply(struct cn_msg *, int);
  123. int drbd_khelper(struct drbd_conf *mdev, char *cmd)
  124. {
  125. char *envp[] = { "HOME=/",
  126. "TERM=linux",
  127. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  128. NULL, /* Will be set to address family */
  129. NULL, /* Will be set to address */
  130. NULL };
  131. char mb[12], af[20], ad[60], *afs;
  132. char *argv[] = {usermode_helper, cmd, mb, NULL };
  133. int ret;
  134. snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
  135. if (get_net_conf(mdev)) {
  136. switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) {
  137. case AF_INET6:
  138. afs = "ipv6";
  139. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
  140. &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr);
  141. break;
  142. case AF_INET:
  143. afs = "ipv4";
  144. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
  145. &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
  146. break;
  147. default:
  148. afs = "ssocks";
  149. snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
  150. &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr);
  151. }
  152. snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
  153. envp[3]=af;
  154. envp[4]=ad;
  155. put_net_conf(mdev);
  156. }
  157. /* The helper may take some time.
  158. * write out any unsynced meta data changes now */
  159. drbd_md_sync(mdev);
  160. dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
  161. drbd_bcast_ev_helper(mdev, cmd);
  162. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  163. if (ret)
  164. dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  165. usermode_helper, cmd, mb,
  166. (ret >> 8) & 0xff, ret);
  167. else
  168. dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  169. usermode_helper, cmd, mb,
  170. (ret >> 8) & 0xff, ret);
  171. if (ret < 0) /* Ignore any ERRNOs we got. */
  172. ret = 0;
  173. return ret;
  174. }
  175. enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
  176. {
  177. char *ex_to_string;
  178. int r;
  179. enum drbd_disk_state nps;
  180. enum drbd_fencing_p fp;
  181. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  182. if (get_ldev_if_state(mdev, D_CONSISTENT)) {
  183. fp = mdev->ldev->dc.fencing;
  184. put_ldev(mdev);
  185. } else {
  186. dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
  187. nps = mdev->state.pdsk;
  188. goto out;
  189. }
  190. r = drbd_khelper(mdev, "fence-peer");
  191. switch ((r>>8) & 0xff) {
  192. case 3: /* peer is inconsistent */
  193. ex_to_string = "peer is inconsistent or worse";
  194. nps = D_INCONSISTENT;
  195. break;
  196. case 4: /* peer got outdated, or was already outdated */
  197. ex_to_string = "peer was fenced";
  198. nps = D_OUTDATED;
  199. break;
  200. case 5: /* peer was down */
  201. if (mdev->state.disk == D_UP_TO_DATE) {
  202. /* we will(have) create(d) a new UUID anyways... */
  203. ex_to_string = "peer is unreachable, assumed to be dead";
  204. nps = D_OUTDATED;
  205. } else {
  206. ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
  207. nps = mdev->state.pdsk;
  208. }
  209. break;
  210. case 6: /* Peer is primary, voluntarily outdate myself.
  211. * This is useful when an unconnected R_SECONDARY is asked to
  212. * become R_PRIMARY, but finds the other peer being active. */
  213. ex_to_string = "peer is active";
  214. dev_warn(DEV, "Peer is primary, outdating myself.\n");
  215. nps = D_UNKNOWN;
  216. _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
  217. break;
  218. case 7:
  219. if (fp != FP_STONITH)
  220. dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
  221. ex_to_string = "peer was stonithed";
  222. nps = D_OUTDATED;
  223. break;
  224. default:
  225. /* The script is broken ... */
  226. nps = D_UNKNOWN;
  227. dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
  228. return nps;
  229. }
  230. dev_info(DEV, "fence-peer helper returned %d (%s)\n",
  231. (r>>8) & 0xff, ex_to_string);
  232. out:
  233. if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
  234. /* The handler was not successful... unfreeze here, the
  235. state engine can not unfreeze... */
  236. _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
  237. }
  238. return nps;
  239. }
  240. static int _try_outdate_peer_async(void *data)
  241. {
  242. struct drbd_conf *mdev = (struct drbd_conf *)data;
  243. enum drbd_disk_state nps;
  244. nps = drbd_try_outdate_peer(mdev);
  245. drbd_request_state(mdev, NS(pdsk, nps));
  246. return 0;
  247. }
  248. void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
  249. {
  250. struct task_struct *opa;
  251. opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
  252. if (IS_ERR(opa))
  253. dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
  254. }
  255. enum drbd_state_rv
  256. drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
  257. {
  258. const int max_tries = 4;
  259. enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
  260. int try = 0;
  261. int forced = 0;
  262. union drbd_state mask, val;
  263. enum drbd_disk_state nps;
  264. if (new_role == R_PRIMARY)
  265. request_ping(mdev); /* Detect a dead peer ASAP */
  266. mutex_lock(&mdev->state_mutex);
  267. mask.i = 0; mask.role = R_MASK;
  268. val.i = 0; val.role = new_role;
  269. while (try++ < max_tries) {
  270. rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
  271. /* in case we first succeeded to outdate,
  272. * but now suddenly could establish a connection */
  273. if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
  274. val.pdsk = 0;
  275. mask.pdsk = 0;
  276. continue;
  277. }
  278. if (rv == SS_NO_UP_TO_DATE_DISK && force &&
  279. (mdev->state.disk < D_UP_TO_DATE &&
  280. mdev->state.disk >= D_INCONSISTENT)) {
  281. mask.disk = D_MASK;
  282. val.disk = D_UP_TO_DATE;
  283. forced = 1;
  284. continue;
  285. }
  286. if (rv == SS_NO_UP_TO_DATE_DISK &&
  287. mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
  288. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  289. nps = drbd_try_outdate_peer(mdev);
  290. if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
  291. val.disk = D_UP_TO_DATE;
  292. mask.disk = D_MASK;
  293. }
  294. val.pdsk = nps;
  295. mask.pdsk = D_MASK;
  296. continue;
  297. }
  298. if (rv == SS_NOTHING_TO_DO)
  299. goto fail;
  300. if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
  301. nps = drbd_try_outdate_peer(mdev);
  302. if (force && nps > D_OUTDATED) {
  303. dev_warn(DEV, "Forced into split brain situation!\n");
  304. nps = D_OUTDATED;
  305. }
  306. mask.pdsk = D_MASK;
  307. val.pdsk = nps;
  308. continue;
  309. }
  310. if (rv == SS_TWO_PRIMARIES) {
  311. /* Maybe the peer is detected as dead very soon...
  312. retry at most once more in this case. */
  313. __set_current_state(TASK_INTERRUPTIBLE);
  314. schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
  315. if (try < max_tries)
  316. try = max_tries - 1;
  317. continue;
  318. }
  319. if (rv < SS_SUCCESS) {
  320. rv = _drbd_request_state(mdev, mask, val,
  321. CS_VERBOSE + CS_WAIT_COMPLETE);
  322. if (rv < SS_SUCCESS)
  323. goto fail;
  324. }
  325. break;
  326. }
  327. if (rv < SS_SUCCESS)
  328. goto fail;
  329. if (forced)
  330. dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
  331. /* Wait until nothing is on the fly :) */
  332. wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
  333. if (new_role == R_SECONDARY) {
  334. set_disk_ro(mdev->vdisk, true);
  335. if (get_ldev(mdev)) {
  336. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  337. put_ldev(mdev);
  338. }
  339. } else {
  340. if (get_net_conf(mdev)) {
  341. mdev->net_conf->want_lose = 0;
  342. put_net_conf(mdev);
  343. }
  344. set_disk_ro(mdev->vdisk, false);
  345. if (get_ldev(mdev)) {
  346. if (((mdev->state.conn < C_CONNECTED ||
  347. mdev->state.pdsk <= D_FAILED)
  348. && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
  349. drbd_uuid_new_current(mdev);
  350. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  351. put_ldev(mdev);
  352. }
  353. }
  354. if ((new_role == R_SECONDARY) && get_ldev(mdev)) {
  355. drbd_al_to_on_disk_bm(mdev);
  356. put_ldev(mdev);
  357. }
  358. if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
  359. /* if this was forced, we should consider sync */
  360. if (forced)
  361. drbd_send_uuids(mdev);
  362. drbd_send_state(mdev);
  363. }
  364. drbd_md_sync(mdev);
  365. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  366. fail:
  367. mutex_unlock(&mdev->state_mutex);
  368. return rv;
  369. }
  370. static struct drbd_conf *ensure_mdev(int minor, int create)
  371. {
  372. struct drbd_conf *mdev;
  373. if (minor >= minor_count)
  374. return NULL;
  375. mdev = minor_to_mdev(minor);
  376. if (!mdev && create) {
  377. struct gendisk *disk = NULL;
  378. mdev = drbd_new_device(minor);
  379. spin_lock_irq(&drbd_pp_lock);
  380. if (minor_table[minor] == NULL) {
  381. minor_table[minor] = mdev;
  382. disk = mdev->vdisk;
  383. mdev = NULL;
  384. } /* else: we lost the race */
  385. spin_unlock_irq(&drbd_pp_lock);
  386. if (disk) /* we won the race above */
  387. /* in case we ever add a drbd_delete_device(),
  388. * don't forget the del_gendisk! */
  389. add_disk(disk);
  390. else /* we lost the race above */
  391. drbd_free_mdev(mdev);
  392. mdev = minor_to_mdev(minor);
  393. }
  394. return mdev;
  395. }
  396. static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  397. struct drbd_nl_cfg_reply *reply)
  398. {
  399. struct primary primary_args;
  400. memset(&primary_args, 0, sizeof(struct primary));
  401. if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) {
  402. reply->ret_code = ERR_MANDATORY_TAG;
  403. return 0;
  404. }
  405. reply->ret_code =
  406. drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force);
  407. return 0;
  408. }
  409. static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  410. struct drbd_nl_cfg_reply *reply)
  411. {
  412. reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0);
  413. return 0;
  414. }
  415. /* initializes the md.*_offset members, so we are able to find
  416. * the on disk meta data */
  417. static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
  418. struct drbd_backing_dev *bdev)
  419. {
  420. sector_t md_size_sect = 0;
  421. switch (bdev->dc.meta_dev_idx) {
  422. default:
  423. /* v07 style fixed size indexed meta data */
  424. bdev->md.md_size_sect = MD_RESERVED_SECT;
  425. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  426. bdev->md.al_offset = MD_AL_OFFSET;
  427. bdev->md.bm_offset = MD_BM_OFFSET;
  428. break;
  429. case DRBD_MD_INDEX_FLEX_EXT:
  430. /* just occupy the full device; unit: sectors */
  431. bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
  432. bdev->md.md_offset = 0;
  433. bdev->md.al_offset = MD_AL_OFFSET;
  434. bdev->md.bm_offset = MD_BM_OFFSET;
  435. break;
  436. case DRBD_MD_INDEX_INTERNAL:
  437. case DRBD_MD_INDEX_FLEX_INT:
  438. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  439. /* al size is still fixed */
  440. bdev->md.al_offset = -MD_AL_MAX_SIZE;
  441. /* we need (slightly less than) ~ this much bitmap sectors: */
  442. md_size_sect = drbd_get_capacity(bdev->backing_bdev);
  443. md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
  444. md_size_sect = BM_SECT_TO_EXT(md_size_sect);
  445. md_size_sect = ALIGN(md_size_sect, 8);
  446. /* plus the "drbd meta data super block",
  447. * and the activity log; */
  448. md_size_sect += MD_BM_OFFSET;
  449. bdev->md.md_size_sect = md_size_sect;
  450. /* bitmap offset is adjusted by 'super' block size */
  451. bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
  452. break;
  453. }
  454. }
  455. char *ppsize(char *buf, unsigned long long size)
  456. {
  457. /* Needs 9 bytes at max. */
  458. static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
  459. int base = 0;
  460. while (size >= 10000) {
  461. /* shift + round */
  462. size = (size >> 10) + !!(size & (1<<9));
  463. base++;
  464. }
  465. sprintf(buf, "%lu %cB", (long)size, units[base]);
  466. return buf;
  467. }
  468. /* there is still a theoretical deadlock when called from receiver
  469. * on an D_INCONSISTENT R_PRIMARY:
  470. * remote READ does inc_ap_bio, receiver would need to receive answer
  471. * packet from remote to dec_ap_bio again.
  472. * receiver receive_sizes(), comes here,
  473. * waits for ap_bio_cnt == 0. -> deadlock.
  474. * but this cannot happen, actually, because:
  475. * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
  476. * (not connected, or bad/no disk on peer):
  477. * see drbd_fail_request_early, ap_bio_cnt is zero.
  478. * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
  479. * peer may not initiate a resize.
  480. */
  481. void drbd_suspend_io(struct drbd_conf *mdev)
  482. {
  483. set_bit(SUSPEND_IO, &mdev->flags);
  484. if (is_susp(mdev->state))
  485. return;
  486. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
  487. }
  488. void drbd_resume_io(struct drbd_conf *mdev)
  489. {
  490. clear_bit(SUSPEND_IO, &mdev->flags);
  491. wake_up(&mdev->misc_wait);
  492. }
  493. /**
  494. * drbd_determine_dev_size() - Sets the right device size obeying all constraints
  495. * @mdev: DRBD device.
  496. *
  497. * Returns 0 on success, negative return values indicate errors.
  498. * You should call drbd_md_sync() after calling this function.
  499. */
  500. enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
  501. {
  502. sector_t prev_first_sect, prev_size; /* previous meta location */
  503. sector_t la_size;
  504. sector_t size;
  505. char ppb[10];
  506. int md_moved, la_size_changed;
  507. enum determine_dev_size rv = unchanged;
  508. /* race:
  509. * application request passes inc_ap_bio,
  510. * but then cannot get an AL-reference.
  511. * this function later may wait on ap_bio_cnt == 0. -> deadlock.
  512. *
  513. * to avoid that:
  514. * Suspend IO right here.
  515. * still lock the act_log to not trigger ASSERTs there.
  516. */
  517. drbd_suspend_io(mdev);
  518. /* no wait necessary anymore, actually we could assert that */
  519. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  520. prev_first_sect = drbd_md_first_sector(mdev->ldev);
  521. prev_size = mdev->ldev->md.md_size_sect;
  522. la_size = mdev->ldev->md.la_size_sect;
  523. /* TODO: should only be some assert here, not (re)init... */
  524. drbd_md_set_sector_offsets(mdev, mdev->ldev);
  525. size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
  526. if (drbd_get_capacity(mdev->this_bdev) != size ||
  527. drbd_bm_capacity(mdev) != size) {
  528. int err;
  529. err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
  530. if (unlikely(err)) {
  531. /* currently there is only one error: ENOMEM! */
  532. size = drbd_bm_capacity(mdev)>>1;
  533. if (size == 0) {
  534. dev_err(DEV, "OUT OF MEMORY! "
  535. "Could not allocate bitmap!\n");
  536. } else {
  537. dev_err(DEV, "BM resizing failed. "
  538. "Leaving size unchanged at size = %lu KB\n",
  539. (unsigned long)size);
  540. }
  541. rv = dev_size_error;
  542. }
  543. /* racy, see comments above. */
  544. drbd_set_my_capacity(mdev, size);
  545. mdev->ldev->md.la_size_sect = size;
  546. dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
  547. (unsigned long long)size>>1);
  548. }
  549. if (rv == dev_size_error)
  550. goto out;
  551. la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
  552. md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
  553. || prev_size != mdev->ldev->md.md_size_sect;
  554. if (la_size_changed || md_moved) {
  555. drbd_al_shrink(mdev); /* All extents inactive. */
  556. dev_info(DEV, "Writing the whole bitmap, %s\n",
  557. la_size_changed && md_moved ? "size changed and md moved" :
  558. la_size_changed ? "size changed" : "md moved");
  559. rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */
  560. drbd_md_mark_dirty(mdev);
  561. }
  562. if (size > la_size)
  563. rv = grew;
  564. if (size < la_size)
  565. rv = shrunk;
  566. out:
  567. lc_unlock(mdev->act_log);
  568. wake_up(&mdev->al_wait);
  569. drbd_resume_io(mdev);
  570. return rv;
  571. }
  572. sector_t
  573. drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
  574. {
  575. sector_t p_size = mdev->p_size; /* partner's disk size. */
  576. sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
  577. sector_t m_size; /* my size */
  578. sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
  579. sector_t size = 0;
  580. m_size = drbd_get_max_capacity(bdev);
  581. if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
  582. dev_warn(DEV, "Resize while not connected was forced by the user!\n");
  583. p_size = m_size;
  584. }
  585. if (p_size && m_size) {
  586. size = min_t(sector_t, p_size, m_size);
  587. } else {
  588. if (la_size) {
  589. size = la_size;
  590. if (m_size && m_size < size)
  591. size = m_size;
  592. if (p_size && p_size < size)
  593. size = p_size;
  594. } else {
  595. if (m_size)
  596. size = m_size;
  597. if (p_size)
  598. size = p_size;
  599. }
  600. }
  601. if (size == 0)
  602. dev_err(DEV, "Both nodes diskless!\n");
  603. if (u_size) {
  604. if (u_size > size)
  605. dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
  606. (unsigned long)u_size>>1, (unsigned long)size>>1);
  607. else
  608. size = u_size;
  609. }
  610. return size;
  611. }
  612. /**
  613. * drbd_check_al_size() - Ensures that the AL is of the right size
  614. * @mdev: DRBD device.
  615. *
  616. * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
  617. * failed, and 0 on success. You should call drbd_md_sync() after you called
  618. * this function.
  619. */
  620. static int drbd_check_al_size(struct drbd_conf *mdev)
  621. {
  622. struct lru_cache *n, *t;
  623. struct lc_element *e;
  624. unsigned int in_use;
  625. int i;
  626. ERR_IF(mdev->sync_conf.al_extents < 7)
  627. mdev->sync_conf.al_extents = 127;
  628. if (mdev->act_log &&
  629. mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
  630. return 0;
  631. in_use = 0;
  632. t = mdev->act_log;
  633. n = lc_create("act_log", drbd_al_ext_cache,
  634. mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
  635. if (n == NULL) {
  636. dev_err(DEV, "Cannot allocate act_log lru!\n");
  637. return -ENOMEM;
  638. }
  639. spin_lock_irq(&mdev->al_lock);
  640. if (t) {
  641. for (i = 0; i < t->nr_elements; i++) {
  642. e = lc_element_by_index(t, i);
  643. if (e->refcnt)
  644. dev_err(DEV, "refcnt(%d)==%d\n",
  645. e->lc_number, e->refcnt);
  646. in_use += e->refcnt;
  647. }
  648. }
  649. if (!in_use)
  650. mdev->act_log = n;
  651. spin_unlock_irq(&mdev->al_lock);
  652. if (in_use) {
  653. dev_err(DEV, "Activity log still in use!\n");
  654. lc_destroy(n);
  655. return -EBUSY;
  656. } else {
  657. if (t)
  658. lc_destroy(t);
  659. }
  660. drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
  661. return 0;
  662. }
  663. void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local)
  664. {
  665. struct request_queue * const q = mdev->rq_queue;
  666. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  667. int max_segments = mdev->ldev->dc.max_bio_bvecs;
  668. int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
  669. blk_queue_logical_block_size(q, 512);
  670. blk_queue_max_hw_sectors(q, max_hw_sectors);
  671. /* This is the workaround for "bio would need to, but cannot, be split" */
  672. blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
  673. blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
  674. blk_queue_stack_limits(q, b);
  675. dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9);
  676. if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
  677. dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
  678. q->backing_dev_info.ra_pages,
  679. b->backing_dev_info.ra_pages);
  680. q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
  681. }
  682. }
  683. /* serialize deconfig (worker exiting, doing cleanup)
  684. * and reconfig (drbdsetup disk, drbdsetup net)
  685. *
  686. * Wait for a potentially exiting worker, then restart it,
  687. * or start a new one. Flush any pending work, there may still be an
  688. * after_state_change queued.
  689. */
  690. static void drbd_reconfig_start(struct drbd_conf *mdev)
  691. {
  692. wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
  693. wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
  694. drbd_thread_start(&mdev->worker);
  695. drbd_flush_workqueue(mdev);
  696. }
  697. /* if still unconfigured, stops worker again.
  698. * if configured now, clears CONFIG_PENDING.
  699. * wakes potential waiters */
  700. static void drbd_reconfig_done(struct drbd_conf *mdev)
  701. {
  702. spin_lock_irq(&mdev->req_lock);
  703. if (mdev->state.disk == D_DISKLESS &&
  704. mdev->state.conn == C_STANDALONE &&
  705. mdev->state.role == R_SECONDARY) {
  706. set_bit(DEVICE_DYING, &mdev->flags);
  707. drbd_thread_stop_nowait(&mdev->worker);
  708. } else
  709. clear_bit(CONFIG_PENDING, &mdev->flags);
  710. spin_unlock_irq(&mdev->req_lock);
  711. wake_up(&mdev->state_wait);
  712. }
  713. /* Make sure IO is suspended before calling this function(). */
  714. static void drbd_suspend_al(struct drbd_conf *mdev)
  715. {
  716. int s = 0;
  717. if (lc_try_lock(mdev->act_log)) {
  718. drbd_al_shrink(mdev);
  719. lc_unlock(mdev->act_log);
  720. } else {
  721. dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
  722. return;
  723. }
  724. spin_lock_irq(&mdev->req_lock);
  725. if (mdev->state.conn < C_CONNECTED)
  726. s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
  727. spin_unlock_irq(&mdev->req_lock);
  728. if (s)
  729. dev_info(DEV, "Suspended AL updates\n");
  730. }
  731. /* does always return 0;
  732. * interesting return code is in reply->ret_code */
  733. static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  734. struct drbd_nl_cfg_reply *reply)
  735. {
  736. enum drbd_ret_code retcode;
  737. enum determine_dev_size dd;
  738. sector_t max_possible_sectors;
  739. sector_t min_md_device_sectors;
  740. struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
  741. struct block_device *bdev;
  742. struct lru_cache *resync_lru = NULL;
  743. union drbd_state ns, os;
  744. unsigned int max_bio_size;
  745. enum drbd_state_rv rv;
  746. int cp_discovered = 0;
  747. int logical_block_size;
  748. drbd_reconfig_start(mdev);
  749. /* if you want to reconfigure, please tear down first */
  750. if (mdev->state.disk > D_DISKLESS) {
  751. retcode = ERR_DISK_CONFIGURED;
  752. goto fail;
  753. }
  754. /* It may just now have detached because of IO error. Make sure
  755. * drbd_ldev_destroy is done already, we may end up here very fast,
  756. * e.g. if someone calls attach from the on-io-error handler,
  757. * to realize a "hot spare" feature (not that I'd recommend that) */
  758. wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
  759. /* allocation not in the IO path, cqueue thread context */
  760. nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
  761. if (!nbc) {
  762. retcode = ERR_NOMEM;
  763. goto fail;
  764. }
  765. nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
  766. nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
  767. nbc->dc.fencing = DRBD_FENCING_DEF;
  768. nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
  769. if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
  770. retcode = ERR_MANDATORY_TAG;
  771. goto fail;
  772. }
  773. if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
  774. retcode = ERR_MD_IDX_INVALID;
  775. goto fail;
  776. }
  777. if (get_net_conf(mdev)) {
  778. int prot = mdev->net_conf->wire_protocol;
  779. put_net_conf(mdev);
  780. if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
  781. retcode = ERR_STONITH_AND_PROT_A;
  782. goto fail;
  783. }
  784. }
  785. bdev = blkdev_get_by_path(nbc->dc.backing_dev,
  786. FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
  787. if (IS_ERR(bdev)) {
  788. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
  789. PTR_ERR(bdev));
  790. retcode = ERR_OPEN_DISK;
  791. goto fail;
  792. }
  793. nbc->backing_bdev = bdev;
  794. /*
  795. * meta_dev_idx >= 0: external fixed size, possibly multiple
  796. * drbd sharing one meta device. TODO in that case, paranoia
  797. * check that [md_bdev, meta_dev_idx] is not yet used by some
  798. * other drbd minor! (if you use drbd.conf + drbdadm, that
  799. * should check it for you already; but if you don't, or
  800. * someone fooled it, we need to double check here)
  801. */
  802. bdev = blkdev_get_by_path(nbc->dc.meta_dev,
  803. FMODE_READ | FMODE_WRITE | FMODE_EXCL,
  804. (nbc->dc.meta_dev_idx < 0) ?
  805. (void *)mdev : (void *)drbd_m_holder);
  806. if (IS_ERR(bdev)) {
  807. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
  808. PTR_ERR(bdev));
  809. retcode = ERR_OPEN_MD_DISK;
  810. goto fail;
  811. }
  812. nbc->md_bdev = bdev;
  813. if ((nbc->backing_bdev == nbc->md_bdev) !=
  814. (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
  815. nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
  816. retcode = ERR_MD_IDX_INVALID;
  817. goto fail;
  818. }
  819. resync_lru = lc_create("resync", drbd_bm_ext_cache,
  820. 61, sizeof(struct bm_extent),
  821. offsetof(struct bm_extent, lce));
  822. if (!resync_lru) {
  823. retcode = ERR_NOMEM;
  824. goto fail;
  825. }
  826. /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
  827. drbd_md_set_sector_offsets(mdev, nbc);
  828. if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
  829. dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
  830. (unsigned long long) drbd_get_max_capacity(nbc),
  831. (unsigned long long) nbc->dc.disk_size);
  832. retcode = ERR_DISK_TO_SMALL;
  833. goto fail;
  834. }
  835. if (nbc->dc.meta_dev_idx < 0) {
  836. max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
  837. /* at least one MB, otherwise it does not make sense */
  838. min_md_device_sectors = (2<<10);
  839. } else {
  840. max_possible_sectors = DRBD_MAX_SECTORS;
  841. min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
  842. }
  843. if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
  844. retcode = ERR_MD_DISK_TO_SMALL;
  845. dev_warn(DEV, "refusing attach: md-device too small, "
  846. "at least %llu sectors needed for this meta-disk type\n",
  847. (unsigned long long) min_md_device_sectors);
  848. goto fail;
  849. }
  850. /* Make sure the new disk is big enough
  851. * (we may currently be R_PRIMARY with no local disk...) */
  852. if (drbd_get_max_capacity(nbc) <
  853. drbd_get_capacity(mdev->this_bdev)) {
  854. retcode = ERR_DISK_TO_SMALL;
  855. goto fail;
  856. }
  857. nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
  858. if (nbc->known_size > max_possible_sectors) {
  859. dev_warn(DEV, "==> truncating very big lower level device "
  860. "to currently maximum possible %llu sectors <==\n",
  861. (unsigned long long) max_possible_sectors);
  862. if (nbc->dc.meta_dev_idx >= 0)
  863. dev_warn(DEV, "==>> using internal or flexible "
  864. "meta data may help <<==\n");
  865. }
  866. drbd_suspend_io(mdev);
  867. /* also wait for the last barrier ack. */
  868. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
  869. /* and for any other previously queued work */
  870. drbd_flush_workqueue(mdev);
  871. rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
  872. retcode = rv; /* FIXME: Type mismatch. */
  873. drbd_resume_io(mdev);
  874. if (rv < SS_SUCCESS)
  875. goto fail;
  876. if (!get_ldev_if_state(mdev, D_ATTACHING))
  877. goto force_diskless;
  878. drbd_md_set_sector_offsets(mdev, nbc);
  879. /* allocate a second IO page if logical_block_size != 512 */
  880. logical_block_size = bdev_logical_block_size(nbc->md_bdev);
  881. if (logical_block_size == 0)
  882. logical_block_size = MD_SECTOR_SIZE;
  883. if (logical_block_size != MD_SECTOR_SIZE) {
  884. if (!mdev->md_io_tmpp) {
  885. struct page *page = alloc_page(GFP_NOIO);
  886. if (!page)
  887. goto force_diskless_dec;
  888. dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n",
  889. logical_block_size, MD_SECTOR_SIZE);
  890. dev_warn(DEV, "Workaround engaged (has performance impact).\n");
  891. mdev->md_io_tmpp = page;
  892. }
  893. }
  894. if (!mdev->bitmap) {
  895. if (drbd_bm_init(mdev)) {
  896. retcode = ERR_NOMEM;
  897. goto force_diskless_dec;
  898. }
  899. }
  900. retcode = drbd_md_read(mdev, nbc);
  901. if (retcode != NO_ERROR)
  902. goto force_diskless_dec;
  903. if (mdev->state.conn < C_CONNECTED &&
  904. mdev->state.role == R_PRIMARY &&
  905. (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
  906. dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
  907. (unsigned long long)mdev->ed_uuid);
  908. retcode = ERR_DATA_NOT_CURRENT;
  909. goto force_diskless_dec;
  910. }
  911. /* Since we are diskless, fix the activity log first... */
  912. if (drbd_check_al_size(mdev)) {
  913. retcode = ERR_NOMEM;
  914. goto force_diskless_dec;
  915. }
  916. /* Prevent shrinking of consistent devices ! */
  917. if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
  918. drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
  919. dev_warn(DEV, "refusing to truncate a consistent device\n");
  920. retcode = ERR_DISK_TO_SMALL;
  921. goto force_diskless_dec;
  922. }
  923. if (!drbd_al_read_log(mdev, nbc)) {
  924. retcode = ERR_IO_MD_DISK;
  925. goto force_diskless_dec;
  926. }
  927. /* Reset the "barriers don't work" bits here, then force meta data to
  928. * be written, to ensure we determine if barriers are supported. */
  929. if (nbc->dc.no_md_flush)
  930. set_bit(MD_NO_FUA, &mdev->flags);
  931. else
  932. clear_bit(MD_NO_FUA, &mdev->flags);
  933. /* Point of no return reached.
  934. * Devices and memory are no longer released by error cleanup below.
  935. * now mdev takes over responsibility, and the state engine should
  936. * clean it up somewhere. */
  937. D_ASSERT(mdev->ldev == NULL);
  938. mdev->ldev = nbc;
  939. mdev->resync = resync_lru;
  940. nbc = NULL;
  941. resync_lru = NULL;
  942. mdev->write_ordering = WO_bdev_flush;
  943. drbd_bump_write_ordering(mdev, WO_bdev_flush);
  944. if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
  945. set_bit(CRASHED_PRIMARY, &mdev->flags);
  946. else
  947. clear_bit(CRASHED_PRIMARY, &mdev->flags);
  948. if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  949. !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
  950. set_bit(CRASHED_PRIMARY, &mdev->flags);
  951. cp_discovered = 1;
  952. }
  953. mdev->send_cnt = 0;
  954. mdev->recv_cnt = 0;
  955. mdev->read_cnt = 0;
  956. mdev->writ_cnt = 0;
  957. max_bio_size = DRBD_MAX_BIO_SIZE;
  958. if (mdev->state.conn == C_CONNECTED) {
  959. /* We are Primary, Connected, and now attach a new local
  960. * backing store. We must not increase the user visible maximum
  961. * bio size on this device to something the peer may not be
  962. * able to handle. */
  963. if (mdev->agreed_pro_version < 94)
  964. max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
  965. else if (mdev->agreed_pro_version == 94)
  966. max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
  967. /* else: drbd 8.3.9 and later, stay with default */
  968. }
  969. drbd_setup_queue_param(mdev, max_bio_size);
  970. /* If I am currently not R_PRIMARY,
  971. * but meta data primary indicator is set,
  972. * I just now recover from a hard crash,
  973. * and have been R_PRIMARY before that crash.
  974. *
  975. * Now, if I had no connection before that crash
  976. * (have been degraded R_PRIMARY), chances are that
  977. * I won't find my peer now either.
  978. *
  979. * In that case, and _only_ in that case,
  980. * we use the degr-wfc-timeout instead of the default,
  981. * so we can automatically recover from a crash of a
  982. * degraded but active "cluster" after a certain timeout.
  983. */
  984. clear_bit(USE_DEGR_WFC_T, &mdev->flags);
  985. if (mdev->state.role != R_PRIMARY &&
  986. drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  987. !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
  988. set_bit(USE_DEGR_WFC_T, &mdev->flags);
  989. dd = drbd_determin_dev_size(mdev, 0);
  990. if (dd == dev_size_error) {
  991. retcode = ERR_NOMEM_BITMAP;
  992. goto force_diskless_dec;
  993. } else if (dd == grew)
  994. set_bit(RESYNC_AFTER_NEG, &mdev->flags);
  995. if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
  996. dev_info(DEV, "Assuming that all blocks are out of sync "
  997. "(aka FullSync)\n");
  998. if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
  999. retcode = ERR_IO_MD_DISK;
  1000. goto force_diskless_dec;
  1001. }
  1002. } else {
  1003. if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
  1004. retcode = ERR_IO_MD_DISK;
  1005. goto force_diskless_dec;
  1006. }
  1007. }
  1008. if (cp_discovered) {
  1009. drbd_al_apply_to_bm(mdev);
  1010. drbd_al_to_on_disk_bm(mdev);
  1011. }
  1012. if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
  1013. drbd_suspend_al(mdev); /* IO is still suspended here... */
  1014. spin_lock_irq(&mdev->req_lock);
  1015. os = mdev->state;
  1016. ns.i = os.i;
  1017. /* If MDF_CONSISTENT is not set go into inconsistent state,
  1018. otherwise investigate MDF_WasUpToDate...
  1019. If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
  1020. otherwise into D_CONSISTENT state.
  1021. */
  1022. if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
  1023. if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
  1024. ns.disk = D_CONSISTENT;
  1025. else
  1026. ns.disk = D_OUTDATED;
  1027. } else {
  1028. ns.disk = D_INCONSISTENT;
  1029. }
  1030. if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
  1031. ns.pdsk = D_OUTDATED;
  1032. if ( ns.disk == D_CONSISTENT &&
  1033. (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
  1034. ns.disk = D_UP_TO_DATE;
  1035. /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
  1036. MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
  1037. this point, because drbd_request_state() modifies these
  1038. flags. */
  1039. /* In case we are C_CONNECTED postpone any decision on the new disk
  1040. state after the negotiation phase. */
  1041. if (mdev->state.conn == C_CONNECTED) {
  1042. mdev->new_state_tmp.i = ns.i;
  1043. ns.i = os.i;
  1044. ns.disk = D_NEGOTIATING;
  1045. /* We expect to receive up-to-date UUIDs soon.
  1046. To avoid a race in receive_state, free p_uuid while
  1047. holding req_lock. I.e. atomic with the state change */
  1048. kfree(mdev->p_uuid);
  1049. mdev->p_uuid = NULL;
  1050. }
  1051. rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
  1052. ns = mdev->state;
  1053. spin_unlock_irq(&mdev->req_lock);
  1054. if (rv < SS_SUCCESS)
  1055. goto force_diskless_dec;
  1056. if (mdev->state.role == R_PRIMARY)
  1057. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  1058. else
  1059. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  1060. drbd_md_mark_dirty(mdev);
  1061. drbd_md_sync(mdev);
  1062. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1063. put_ldev(mdev);
  1064. reply->ret_code = retcode;
  1065. drbd_reconfig_done(mdev);
  1066. return 0;
  1067. force_diskless_dec:
  1068. put_ldev(mdev);
  1069. force_diskless:
  1070. drbd_force_state(mdev, NS(disk, D_FAILED));
  1071. drbd_md_sync(mdev);
  1072. fail:
  1073. if (nbc) {
  1074. if (nbc->backing_bdev)
  1075. blkdev_put(nbc->backing_bdev,
  1076. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1077. if (nbc->md_bdev)
  1078. blkdev_put(nbc->md_bdev,
  1079. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1080. kfree(nbc);
  1081. }
  1082. lc_destroy(resync_lru);
  1083. reply->ret_code = retcode;
  1084. drbd_reconfig_done(mdev);
  1085. return 0;
  1086. }
  1087. /* Detaching the disk is a process in multiple stages. First we need to lock
  1088. * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
  1089. * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
  1090. * internal references as well.
  1091. * Only then we have finally detached. */
  1092. static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1093. struct drbd_nl_cfg_reply *reply)
  1094. {
  1095. drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
  1096. reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
  1097. if (mdev->state.disk == D_DISKLESS)
  1098. wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
  1099. drbd_resume_io(mdev);
  1100. return 0;
  1101. }
  1102. static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1103. struct drbd_nl_cfg_reply *reply)
  1104. {
  1105. int i, ns;
  1106. enum drbd_ret_code retcode;
  1107. struct net_conf *new_conf = NULL;
  1108. struct crypto_hash *tfm = NULL;
  1109. struct crypto_hash *integrity_w_tfm = NULL;
  1110. struct crypto_hash *integrity_r_tfm = NULL;
  1111. struct hlist_head *new_tl_hash = NULL;
  1112. struct hlist_head *new_ee_hash = NULL;
  1113. struct drbd_conf *odev;
  1114. char hmac_name[CRYPTO_MAX_ALG_NAME];
  1115. void *int_dig_out = NULL;
  1116. void *int_dig_in = NULL;
  1117. void *int_dig_vv = NULL;
  1118. struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
  1119. drbd_reconfig_start(mdev);
  1120. if (mdev->state.conn > C_STANDALONE) {
  1121. retcode = ERR_NET_CONFIGURED;
  1122. goto fail;
  1123. }
  1124. /* allocation not in the IO path, cqueue thread context */
  1125. new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
  1126. if (!new_conf) {
  1127. retcode = ERR_NOMEM;
  1128. goto fail;
  1129. }
  1130. new_conf->timeout = DRBD_TIMEOUT_DEF;
  1131. new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
  1132. new_conf->ping_int = DRBD_PING_INT_DEF;
  1133. new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
  1134. new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
  1135. new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
  1136. new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
  1137. new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
  1138. new_conf->ko_count = DRBD_KO_COUNT_DEF;
  1139. new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
  1140. new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
  1141. new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
  1142. new_conf->want_lose = 0;
  1143. new_conf->two_primaries = 0;
  1144. new_conf->wire_protocol = DRBD_PROT_C;
  1145. new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
  1146. new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
  1147. new_conf->on_congestion = DRBD_ON_CONGESTION_DEF;
  1148. new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF;
  1149. if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
  1150. retcode = ERR_MANDATORY_TAG;
  1151. goto fail;
  1152. }
  1153. if (new_conf->two_primaries
  1154. && (new_conf->wire_protocol != DRBD_PROT_C)) {
  1155. retcode = ERR_NOT_PROTO_C;
  1156. goto fail;
  1157. }
  1158. if (get_ldev(mdev)) {
  1159. enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
  1160. put_ldev(mdev);
  1161. if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
  1162. retcode = ERR_STONITH_AND_PROT_A;
  1163. goto fail;
  1164. }
  1165. }
  1166. if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
  1167. retcode = ERR_CONG_NOT_PROTO_A;
  1168. goto fail;
  1169. }
  1170. if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
  1171. retcode = ERR_DISCARD;
  1172. goto fail;
  1173. }
  1174. retcode = NO_ERROR;
  1175. new_my_addr = (struct sockaddr *)&new_conf->my_addr;
  1176. new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
  1177. for (i = 0; i < minor_count; i++) {
  1178. odev = minor_to_mdev(i);
  1179. if (!odev || odev == mdev)
  1180. continue;
  1181. if (get_net_conf(odev)) {
  1182. taken_addr = (struct sockaddr *)&odev->net_conf->my_addr;
  1183. if (new_conf->my_addr_len == odev->net_conf->my_addr_len &&
  1184. !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
  1185. retcode = ERR_LOCAL_ADDR;
  1186. taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr;
  1187. if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len &&
  1188. !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
  1189. retcode = ERR_PEER_ADDR;
  1190. put_net_conf(odev);
  1191. if (retcode != NO_ERROR)
  1192. goto fail;
  1193. }
  1194. }
  1195. if (new_conf->cram_hmac_alg[0] != 0) {
  1196. snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
  1197. new_conf->cram_hmac_alg);
  1198. tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
  1199. if (IS_ERR(tfm)) {
  1200. tfm = NULL;
  1201. retcode = ERR_AUTH_ALG;
  1202. goto fail;
  1203. }
  1204. if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
  1205. retcode = ERR_AUTH_ALG_ND;
  1206. goto fail;
  1207. }
  1208. }
  1209. if (new_conf->integrity_alg[0]) {
  1210. integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1211. if (IS_ERR(integrity_w_tfm)) {
  1212. integrity_w_tfm = NULL;
  1213. retcode=ERR_INTEGRITY_ALG;
  1214. goto fail;
  1215. }
  1216. if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
  1217. retcode=ERR_INTEGRITY_ALG_ND;
  1218. goto fail;
  1219. }
  1220. integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1221. if (IS_ERR(integrity_r_tfm)) {
  1222. integrity_r_tfm = NULL;
  1223. retcode=ERR_INTEGRITY_ALG;
  1224. goto fail;
  1225. }
  1226. }
  1227. ns = new_conf->max_epoch_size/8;
  1228. if (mdev->tl_hash_s != ns) {
  1229. new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
  1230. if (!new_tl_hash) {
  1231. retcode = ERR_NOMEM;
  1232. goto fail;
  1233. }
  1234. }
  1235. ns = new_conf->max_buffers/8;
  1236. if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) {
  1237. new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL);
  1238. if (!new_ee_hash) {
  1239. retcode = ERR_NOMEM;
  1240. goto fail;
  1241. }
  1242. }
  1243. ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
  1244. if (integrity_w_tfm) {
  1245. i = crypto_hash_digestsize(integrity_w_tfm);
  1246. int_dig_out = kmalloc(i, GFP_KERNEL);
  1247. if (!int_dig_out) {
  1248. retcode = ERR_NOMEM;
  1249. goto fail;
  1250. }
  1251. int_dig_in = kmalloc(i, GFP_KERNEL);
  1252. if (!int_dig_in) {
  1253. retcode = ERR_NOMEM;
  1254. goto fail;
  1255. }
  1256. int_dig_vv = kmalloc(i, GFP_KERNEL);
  1257. if (!int_dig_vv) {
  1258. retcode = ERR_NOMEM;
  1259. goto fail;
  1260. }
  1261. }
  1262. if (!mdev->bitmap) {
  1263. if(drbd_bm_init(mdev)) {
  1264. retcode = ERR_NOMEM;
  1265. goto fail;
  1266. }
  1267. }
  1268. drbd_flush_workqueue(mdev);
  1269. spin_lock_irq(&mdev->req_lock);
  1270. if (mdev->net_conf != NULL) {
  1271. retcode = ERR_NET_CONFIGURED;
  1272. spin_unlock_irq(&mdev->req_lock);
  1273. goto fail;
  1274. }
  1275. mdev->net_conf = new_conf;
  1276. mdev->send_cnt = 0;
  1277. mdev->recv_cnt = 0;
  1278. if (new_tl_hash) {
  1279. kfree(mdev->tl_hash);
  1280. mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8;
  1281. mdev->tl_hash = new_tl_hash;
  1282. }
  1283. if (new_ee_hash) {
  1284. kfree(mdev->ee_hash);
  1285. mdev->ee_hash_s = mdev->net_conf->max_buffers/8;
  1286. mdev->ee_hash = new_ee_hash;
  1287. }
  1288. crypto_free_hash(mdev->cram_hmac_tfm);
  1289. mdev->cram_hmac_tfm = tfm;
  1290. crypto_free_hash(mdev->integrity_w_tfm);
  1291. mdev->integrity_w_tfm = integrity_w_tfm;
  1292. crypto_free_hash(mdev->integrity_r_tfm);
  1293. mdev->integrity_r_tfm = integrity_r_tfm;
  1294. kfree(mdev->int_dig_out);
  1295. kfree(mdev->int_dig_in);
  1296. kfree(mdev->int_dig_vv);
  1297. mdev->int_dig_out=int_dig_out;
  1298. mdev->int_dig_in=int_dig_in;
  1299. mdev->int_dig_vv=int_dig_vv;
  1300. retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL);
  1301. spin_unlock_irq(&mdev->req_lock);
  1302. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1303. reply->ret_code = retcode;
  1304. drbd_reconfig_done(mdev);
  1305. return 0;
  1306. fail:
  1307. kfree(int_dig_out);
  1308. kfree(int_dig_in);
  1309. kfree(int_dig_vv);
  1310. crypto_free_hash(tfm);
  1311. crypto_free_hash(integrity_w_tfm);
  1312. crypto_free_hash(integrity_r_tfm);
  1313. kfree(new_tl_hash);
  1314. kfree(new_ee_hash);
  1315. kfree(new_conf);
  1316. reply->ret_code = retcode;
  1317. drbd_reconfig_done(mdev);
  1318. return 0;
  1319. }
  1320. static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1321. struct drbd_nl_cfg_reply *reply)
  1322. {
  1323. int retcode;
  1324. struct disconnect dc;
  1325. memset(&dc, 0, sizeof(struct disconnect));
  1326. if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) {
  1327. retcode = ERR_MANDATORY_TAG;
  1328. goto fail;
  1329. }
  1330. if (dc.force) {
  1331. spin_lock_irq(&mdev->req_lock);
  1332. if (mdev->state.conn >= C_WF_CONNECTION)
  1333. _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
  1334. spin_unlock_irq(&mdev->req_lock);
  1335. goto done;
  1336. }
  1337. retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
  1338. if (retcode == SS_NOTHING_TO_DO)
  1339. goto done;
  1340. else if (retcode == SS_ALREADY_STANDALONE)
  1341. goto done;
  1342. else if (retcode == SS_PRIMARY_NOP) {
  1343. /* Our statche checking code wants to see the peer outdated. */
  1344. retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
  1345. pdsk, D_OUTDATED));
  1346. } else if (retcode == SS_CW_FAILED_BY_PEER) {
  1347. /* The peer probably wants to see us outdated. */
  1348. retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING,
  1349. disk, D_OUTDATED),
  1350. CS_ORDERED);
  1351. if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
  1352. drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
  1353. retcode = SS_SUCCESS;
  1354. }
  1355. }
  1356. if (retcode < SS_SUCCESS)
  1357. goto fail;
  1358. if (wait_event_interruptible(mdev->state_wait,
  1359. mdev->state.conn != C_DISCONNECTING)) {
  1360. /* Do not test for mdev->state.conn == C_STANDALONE, since
  1361. someone else might connect us in the mean time! */
  1362. retcode = ERR_INTR;
  1363. goto fail;
  1364. }
  1365. done:
  1366. retcode = NO_ERROR;
  1367. fail:
  1368. drbd_md_sync(mdev);
  1369. reply->ret_code = retcode;
  1370. return 0;
  1371. }
  1372. void resync_after_online_grow(struct drbd_conf *mdev)
  1373. {
  1374. int iass; /* I am sync source */
  1375. dev_info(DEV, "Resync of new storage after online grow\n");
  1376. if (mdev->state.role != mdev->state.peer)
  1377. iass = (mdev->state.role == R_PRIMARY);
  1378. else
  1379. iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
  1380. if (iass)
  1381. drbd_start_resync(mdev, C_SYNC_SOURCE);
  1382. else
  1383. _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
  1384. }
  1385. static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1386. struct drbd_nl_cfg_reply *reply)
  1387. {
  1388. struct resize rs;
  1389. int retcode = NO_ERROR;
  1390. enum determine_dev_size dd;
  1391. enum dds_flags ddsf;
  1392. memset(&rs, 0, sizeof(struct resize));
  1393. if (!resize_from_tags(mdev, nlp->tag_list, &rs)) {
  1394. retcode = ERR_MANDATORY_TAG;
  1395. goto fail;
  1396. }
  1397. if (mdev->state.conn > C_CONNECTED) {
  1398. retcode = ERR_RESIZE_RESYNC;
  1399. goto fail;
  1400. }
  1401. if (mdev->state.role == R_SECONDARY &&
  1402. mdev->state.peer == R_SECONDARY) {
  1403. retcode = ERR_NO_PRIMARY;
  1404. goto fail;
  1405. }
  1406. if (!get_ldev(mdev)) {
  1407. retcode = ERR_NO_DISK;
  1408. goto fail;
  1409. }
  1410. if (rs.no_resync && mdev->agreed_pro_version < 93) {
  1411. retcode = ERR_NEED_APV_93;
  1412. goto fail;
  1413. }
  1414. if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
  1415. mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
  1416. mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
  1417. ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
  1418. dd = drbd_determin_dev_size(mdev, ddsf);
  1419. drbd_md_sync(mdev);
  1420. put_ldev(mdev);
  1421. if (dd == dev_size_error) {
  1422. retcode = ERR_NOMEM_BITMAP;
  1423. goto fail;
  1424. }
  1425. if (mdev->state.conn == C_CONNECTED) {
  1426. if (dd == grew)
  1427. set_bit(RESIZE_PENDING, &mdev->flags);
  1428. drbd_send_uuids(mdev);
  1429. drbd_send_sizes(mdev, 1, ddsf);
  1430. }
  1431. fail:
  1432. reply->ret_code = retcode;
  1433. return 0;
  1434. }
  1435. static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1436. struct drbd_nl_cfg_reply *reply)
  1437. {
  1438. int retcode = NO_ERROR;
  1439. int err;
  1440. int ovr; /* online verify running */
  1441. int rsr; /* re-sync running */
  1442. struct crypto_hash *verify_tfm = NULL;
  1443. struct crypto_hash *csums_tfm = NULL;
  1444. struct syncer_conf sc;
  1445. cpumask_var_t new_cpu_mask;
  1446. int *rs_plan_s = NULL;
  1447. int fifo_size;
  1448. if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
  1449. retcode = ERR_NOMEM;
  1450. goto fail;
  1451. }
  1452. if (nlp->flags & DRBD_NL_SET_DEFAULTS) {
  1453. memset(&sc, 0, sizeof(struct syncer_conf));
  1454. sc.rate = DRBD_RATE_DEF;
  1455. sc.after = DRBD_AFTER_DEF;
  1456. sc.al_extents = DRBD_AL_EXTENTS_DEF;
  1457. sc.on_no_data = DRBD_ON_NO_DATA_DEF;
  1458. sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
  1459. sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
  1460. sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
  1461. sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
  1462. sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
  1463. } else
  1464. memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
  1465. if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) {
  1466. retcode = ERR_MANDATORY_TAG;
  1467. goto fail;
  1468. }
  1469. /* re-sync running */
  1470. rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
  1471. mdev->state.conn == C_SYNC_TARGET ||
  1472. mdev->state.conn == C_PAUSED_SYNC_S ||
  1473. mdev->state.conn == C_PAUSED_SYNC_T );
  1474. if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
  1475. retcode = ERR_CSUMS_RESYNC_RUNNING;
  1476. goto fail;
  1477. }
  1478. if (!rsr && sc.csums_alg[0]) {
  1479. csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
  1480. if (IS_ERR(csums_tfm)) {
  1481. csums_tfm = NULL;
  1482. retcode = ERR_CSUMS_ALG;
  1483. goto fail;
  1484. }
  1485. if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
  1486. retcode = ERR_CSUMS_ALG_ND;
  1487. goto fail;
  1488. }
  1489. }
  1490. /* online verify running */
  1491. ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
  1492. if (ovr) {
  1493. if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
  1494. retcode = ERR_VERIFY_RUNNING;
  1495. goto fail;
  1496. }
  1497. }
  1498. if (!ovr && sc.verify_alg[0]) {
  1499. verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
  1500. if (IS_ERR(verify_tfm)) {
  1501. verify_tfm = NULL;
  1502. retcode = ERR_VERIFY_ALG;
  1503. goto fail;
  1504. }
  1505. if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
  1506. retcode = ERR_VERIFY_ALG_ND;
  1507. goto fail;
  1508. }
  1509. }
  1510. /* silently ignore cpu mask on UP kernel */
  1511. if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
  1512. err = __bitmap_parse(sc.cpu_mask, 32, 0,
  1513. cpumask_bits(new_cpu_mask), nr_cpu_ids);
  1514. if (err) {
  1515. dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
  1516. retcode = ERR_CPU_MASK_PARSE;
  1517. goto fail;
  1518. }
  1519. }
  1520. ERR_IF (sc.rate < 1) sc.rate = 1;
  1521. ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */
  1522. #define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
  1523. if (sc.al_extents > AL_MAX) {
  1524. dev_err(DEV, "sc.al_extents > %d\n", AL_MAX);
  1525. sc.al_extents = AL_MAX;
  1526. }
  1527. #undef AL_MAX
  1528. /* to avoid spurious errors when configuring minors before configuring
  1529. * the minors they depend on: if necessary, first create the minor we
  1530. * depend on */
  1531. if (sc.after >= 0)
  1532. ensure_mdev(sc.after, 1);
  1533. /* most sanity checks done, try to assign the new sync-after
  1534. * dependency. need to hold the global lock in there,
  1535. * to avoid a race in the dependency loop check. */
  1536. retcode = drbd_alter_sa(mdev, sc.after);
  1537. if (retcode != NO_ERROR)
  1538. goto fail;
  1539. fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
  1540. if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
  1541. rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
  1542. if (!rs_plan_s) {
  1543. dev_err(DEV, "kmalloc of fifo_buffer failed");
  1544. retcode = ERR_NOMEM;
  1545. goto fail;
  1546. }
  1547. }
  1548. /* ok, assign the rest of it as well.
  1549. * lock against receive_SyncParam() */
  1550. spin_lock(&mdev->peer_seq_lock);
  1551. mdev->sync_conf = sc;
  1552. if (!rsr) {
  1553. crypto_free_hash(mdev->csums_tfm);
  1554. mdev->csums_tfm = csums_tfm;
  1555. csums_tfm = NULL;
  1556. }
  1557. if (!ovr) {
  1558. crypto_free_hash(mdev->verify_tfm);
  1559. mdev->verify_tfm = verify_tfm;
  1560. verify_tfm = NULL;
  1561. }
  1562. if (fifo_size != mdev->rs_plan_s.size) {
  1563. kfree(mdev->rs_plan_s.values);
  1564. mdev->rs_plan_s.values = rs_plan_s;
  1565. mdev->rs_plan_s.size = fifo_size;
  1566. mdev->rs_planed = 0;
  1567. rs_plan_s = NULL;
  1568. }
  1569. spin_unlock(&mdev->peer_seq_lock);
  1570. if (get_ldev(mdev)) {
  1571. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  1572. drbd_al_shrink(mdev);
  1573. err = drbd_check_al_size(mdev);
  1574. lc_unlock(mdev->act_log);
  1575. wake_up(&mdev->al_wait);
  1576. put_ldev(mdev);
  1577. drbd_md_sync(mdev);
  1578. if (err) {
  1579. retcode = ERR_NOMEM;
  1580. goto fail;
  1581. }
  1582. }
  1583. if (mdev->state.conn >= C_CONNECTED)
  1584. drbd_send_sync_param(mdev, &sc);
  1585. if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) {
  1586. cpumask_copy(mdev->cpu_mask, new_cpu_mask);
  1587. drbd_calc_cpu_mask(mdev);
  1588. mdev->receiver.reset_cpu_mask = 1;
  1589. mdev->asender.reset_cpu_mask = 1;
  1590. mdev->worker.reset_cpu_mask = 1;
  1591. }
  1592. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1593. fail:
  1594. kfree(rs_plan_s);
  1595. free_cpumask_var(new_cpu_mask);
  1596. crypto_free_hash(csums_tfm);
  1597. crypto_free_hash(verify_tfm);
  1598. reply->ret_code = retcode;
  1599. return 0;
  1600. }
  1601. static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1602. struct drbd_nl_cfg_reply *reply)
  1603. {
  1604. int retcode;
  1605. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
  1606. if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
  1607. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1608. while (retcode == SS_NEED_CONNECTION) {
  1609. spin_lock_irq(&mdev->req_lock);
  1610. if (mdev->state.conn < C_CONNECTED)
  1611. retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
  1612. spin_unlock_irq(&mdev->req_lock);
  1613. if (retcode != SS_NEED_CONNECTION)
  1614. break;
  1615. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1616. }
  1617. reply->ret_code = retcode;
  1618. return 0;
  1619. }
  1620. static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
  1621. {
  1622. int rv;
  1623. rv = drbd_bmio_set_n_write(mdev);
  1624. drbd_suspend_al(mdev);
  1625. return rv;
  1626. }
  1627. static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1628. struct drbd_nl_cfg_reply *reply)
  1629. {
  1630. int retcode;
  1631. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
  1632. if (retcode < SS_SUCCESS) {
  1633. if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
  1634. /* The peer will get a resync upon connect anyways. Just make that
  1635. into a full resync. */
  1636. retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
  1637. if (retcode >= SS_SUCCESS) {
  1638. /* open coded drbd_bitmap_io() */
  1639. if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
  1640. "set_n_write from invalidate_peer"))
  1641. retcode = ERR_IO_MD_DISK;
  1642. }
  1643. } else
  1644. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
  1645. }
  1646. reply->ret_code = retcode;
  1647. return 0;
  1648. }
  1649. static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1650. struct drbd_nl_cfg_reply *reply)
  1651. {
  1652. int retcode = NO_ERROR;
  1653. if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
  1654. retcode = ERR_PAUSE_IS_SET;
  1655. reply->ret_code = retcode;
  1656. return 0;
  1657. }
  1658. static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1659. struct drbd_nl_cfg_reply *reply)
  1660. {
  1661. int retcode = NO_ERROR;
  1662. if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO)
  1663. retcode = ERR_PAUSE_IS_CLEAR;
  1664. reply->ret_code = retcode;
  1665. return 0;
  1666. }
  1667. static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1668. struct drbd_nl_cfg_reply *reply)
  1669. {
  1670. reply->ret_code = drbd_request_state(mdev, NS(susp, 1));
  1671. return 0;
  1672. }
  1673. static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1674. struct drbd_nl_cfg_reply *reply)
  1675. {
  1676. if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
  1677. drbd_uuid_new_current(mdev);
  1678. clear_bit(NEW_CUR_UUID, &mdev->flags);
  1679. }
  1680. drbd_suspend_io(mdev);
  1681. reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
  1682. if (reply->ret_code == SS_SUCCESS) {
  1683. if (mdev->state.conn < C_CONNECTED)
  1684. tl_clear(mdev);
  1685. if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
  1686. tl_restart(mdev, fail_frozen_disk_io);
  1687. }
  1688. drbd_resume_io(mdev);
  1689. return 0;
  1690. }
  1691. static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1692. struct drbd_nl_cfg_reply *reply)
  1693. {
  1694. reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED));
  1695. return 0;
  1696. }
  1697. static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1698. struct drbd_nl_cfg_reply *reply)
  1699. {
  1700. unsigned short *tl;
  1701. tl = reply->tag_list;
  1702. if (get_ldev(mdev)) {
  1703. tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl);
  1704. put_ldev(mdev);
  1705. }
  1706. if (get_net_conf(mdev)) {
  1707. tl = net_conf_to_tags(mdev, mdev->net_conf, tl);
  1708. put_net_conf(mdev);
  1709. }
  1710. tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl);
  1711. put_unaligned(TT_END, tl++); /* Close the tag list */
  1712. return (int)((char *)tl - (char *)reply->tag_list);
  1713. }
  1714. static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1715. struct drbd_nl_cfg_reply *reply)
  1716. {
  1717. unsigned short *tl = reply->tag_list;
  1718. union drbd_state s = mdev->state;
  1719. unsigned long rs_left;
  1720. unsigned int res;
  1721. tl = get_state_to_tags(mdev, (struct get_state *)&s, tl);
  1722. /* no local ref, no bitmap, no syncer progress. */
  1723. if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) {
  1724. if (get_ldev(mdev)) {
  1725. drbd_get_syncer_progress(mdev, &rs_left, &res);
  1726. tl = tl_add_int(tl, T_sync_progress, &res);
  1727. put_ldev(mdev);
  1728. }
  1729. }
  1730. put_unaligned(TT_END, tl++); /* Close the tag list */
  1731. return (int)((char *)tl - (char *)reply->tag_list);
  1732. }
  1733. static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1734. struct drbd_nl_cfg_reply *reply)
  1735. {
  1736. unsigned short *tl;
  1737. tl = reply->tag_list;
  1738. if (get_ldev(mdev)) {
  1739. tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
  1740. tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
  1741. put_ldev(mdev);
  1742. }
  1743. put_unaligned(TT_END, tl++); /* Close the tag list */
  1744. return (int)((char *)tl - (char *)reply->tag_list);
  1745. }
  1746. /**
  1747. * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
  1748. * @mdev: DRBD device.
  1749. * @nlp: Netlink/connector packet from drbdsetup
  1750. * @reply: Reply packet for drbdsetup
  1751. */
  1752. static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1753. struct drbd_nl_cfg_reply *reply)
  1754. {
  1755. unsigned short *tl;
  1756. char rv;
  1757. tl = reply->tag_list;
  1758. rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
  1759. test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
  1760. tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
  1761. put_unaligned(TT_END, tl++); /* Close the tag list */
  1762. return (int)((char *)tl - (char *)reply->tag_list);
  1763. }
  1764. static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1765. struct drbd_nl_cfg_reply *reply)
  1766. {
  1767. /* default to resume from last known position, if possible */
  1768. struct start_ov args =
  1769. { .start_sector = mdev->ov_start_sector };
  1770. if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
  1771. reply->ret_code = ERR_MANDATORY_TAG;
  1772. return 0;
  1773. }
  1774. /* w_make_ov_request expects position to be aligned */
  1775. mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
  1776. reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
  1777. return 0;
  1778. }
  1779. static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
  1780. struct drbd_nl_cfg_reply *reply)
  1781. {
  1782. int retcode = NO_ERROR;
  1783. int skip_initial_sync = 0;
  1784. int err;
  1785. struct new_c_uuid args;
  1786. memset(&args, 0, sizeof(struct new_c_uuid));
  1787. if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) {
  1788. reply->ret_code = ERR_MANDATORY_TAG;
  1789. return 0;
  1790. }
  1791. mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
  1792. if (!get_ldev(mdev)) {
  1793. retcode = ERR_NO_DISK;
  1794. goto out;
  1795. }
  1796. /* this is "skip initial sync", assume to be clean */
  1797. if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 &&
  1798. mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
  1799. dev_info(DEV, "Preparing to skip initial sync\n");
  1800. skip_initial_sync = 1;
  1801. } else if (mdev->state.conn != C_STANDALONE) {
  1802. retcode = ERR_CONNECTED;
  1803. goto out_dec;
  1804. }
  1805. drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
  1806. drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
  1807. if (args.clear_bm) {
  1808. err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
  1809. if (err) {
  1810. dev_err(DEV, "Writing bitmap failed with %d\n",err);
  1811. retcode = ERR_IO_MD_DISK;
  1812. }
  1813. if (skip_initial_sync) {
  1814. drbd_send_uuids_skip_initial_sync(mdev);
  1815. _drbd_uuid_set(mdev, UI_BITMAP, 0);
  1816. spin_lock_irq(&mdev->req_lock);
  1817. _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
  1818. CS_VERBOSE, NULL);
  1819. spin_unlock_irq(&mdev->req_lock);
  1820. }
  1821. }
  1822. drbd_md_sync(mdev);
  1823. out_dec:
  1824. put_ldev(mdev);
  1825. out:
  1826. mutex_unlock(&mdev->state_mutex);
  1827. reply->ret_code = retcode;
  1828. return 0;
  1829. }
  1830. struct cn_handler_struct {
  1831. int (*function)(struct drbd_conf *,
  1832. struct drbd_nl_cfg_req *,
  1833. struct drbd_nl_cfg_reply *);
  1834. int reply_body_size;
  1835. };
  1836. static struct cn_handler_struct cnd_table[] = {
  1837. [ P_primary ] = { &drbd_nl_primary, 0 },
  1838. [ P_secondary ] = { &drbd_nl_secondary, 0 },
  1839. [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 },
  1840. [ P_detach ] = { &drbd_nl_detach, 0 },
  1841. [ P_net_conf ] = { &drbd_nl_net_conf, 0 },
  1842. [ P_disconnect ] = { &drbd_nl_disconnect, 0 },
  1843. [ P_resize ] = { &drbd_nl_resize, 0 },
  1844. [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 },
  1845. [ P_invalidate ] = { &drbd_nl_invalidate, 0 },
  1846. [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 },
  1847. [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 },
  1848. [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 },
  1849. [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 },
  1850. [ P_resume_io ] = { &drbd_nl_resume_io, 0 },
  1851. [ P_outdate ] = { &drbd_nl_outdate, 0 },
  1852. [ P_get_config ] = { &drbd_nl_get_config,
  1853. sizeof(struct syncer_conf_tag_len_struct) +
  1854. sizeof(struct disk_conf_tag_len_struct) +
  1855. sizeof(struct net_conf_tag_len_struct) },
  1856. [ P_get_state ] = { &drbd_nl_get_state,
  1857. sizeof(struct get_state_tag_len_struct) +
  1858. sizeof(struct sync_progress_tag_len_struct) },
  1859. [ P_get_uuids ] = { &drbd_nl_get_uuids,
  1860. sizeof(struct get_uuids_tag_len_struct) },
  1861. [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag,
  1862. sizeof(struct get_timeout_flag_tag_len_struct)},
  1863. [ P_start_ov ] = { &drbd_nl_start_ov, 0 },
  1864. [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 },
  1865. };
  1866. static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp)
  1867. {
  1868. struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data;
  1869. struct cn_handler_struct *cm;
  1870. struct cn_msg *cn_reply;
  1871. struct drbd_nl_cfg_reply *reply;
  1872. struct drbd_conf *mdev;
  1873. int retcode, rr;
  1874. int reply_size = sizeof(struct cn_msg)
  1875. + sizeof(struct drbd_nl_cfg_reply)
  1876. + sizeof(short int);
  1877. if (!try_module_get(THIS_MODULE)) {
  1878. printk(KERN_ERR "drbd: try_module_get() failed!\n");
  1879. return;
  1880. }
  1881. if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
  1882. retcode = ERR_PERM;
  1883. goto fail;
  1884. }
  1885. mdev = ensure_mdev(nlp->drbd_minor,
  1886. (nlp->flags & DRBD_NL_CREATE_DEVICE));
  1887. if (!mdev) {
  1888. retcode = ERR_MINOR_INVALID;
  1889. goto fail;
  1890. }
  1891. if (nlp->packet_type >= P_nl_after_last_packet ||
  1892. nlp->packet_type == P_return_code_only) {
  1893. retcode = ERR_PACKET_NR;
  1894. goto fail;
  1895. }
  1896. cm = cnd_table + nlp->packet_type;
  1897. /* This may happen if packet number is 0: */
  1898. if (cm->function == NULL) {
  1899. retcode = ERR_PACKET_NR;
  1900. goto fail;
  1901. }
  1902. reply_size += cm->reply_body_size;
  1903. /* allocation not in the IO path, cqueue thread context */
  1904. cn_reply = kzalloc(reply_size, GFP_KERNEL);
  1905. if (!cn_reply) {
  1906. retcode = ERR_NOMEM;
  1907. goto fail;
  1908. }
  1909. reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
  1910. reply->packet_type =
  1911. cm->reply_body_size ? nlp->packet_type : P_return_code_only;
  1912. reply->minor = nlp->drbd_minor;
  1913. reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
  1914. /* reply->tag_list; might be modified by cm->function. */
  1915. rr = cm->function(mdev, nlp, reply);
  1916. cn_reply->id = req->id;
  1917. cn_reply->seq = req->seq;
  1918. cn_reply->ack = req->ack + 1;
  1919. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
  1920. cn_reply->flags = 0;
  1921. rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
  1922. if (rr && rr != -ESRCH)
  1923. printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
  1924. kfree(cn_reply);
  1925. module_put(THIS_MODULE);
  1926. return;
  1927. fail:
  1928. drbd_nl_send_reply(req, retcode);
  1929. module_put(THIS_MODULE);
  1930. }
  1931. static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
  1932. static unsigned short *
  1933. __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
  1934. unsigned short len, int nul_terminated)
  1935. {
  1936. unsigned short l = tag_descriptions[tag_number(tag)].max_len;
  1937. len = (len < l) ? len : l;
  1938. put_unaligned(tag, tl++);
  1939. put_unaligned(len, tl++);
  1940. memcpy(tl, data, len);
  1941. tl = (unsigned short*)((char*)tl + len);
  1942. if (nul_terminated)
  1943. *((char*)tl - 1) = 0;
  1944. return tl;
  1945. }
  1946. static unsigned short *
  1947. tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len)
  1948. {
  1949. return __tl_add_blob(tl, tag, data, len, 0);
  1950. }
  1951. static unsigned short *
  1952. tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str)
  1953. {
  1954. return __tl_add_blob(tl, tag, str, strlen(str)+1, 0);
  1955. }
  1956. static unsigned short *
  1957. tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val)
  1958. {
  1959. put_unaligned(tag, tl++);
  1960. switch(tag_type(tag)) {
  1961. case TT_INTEGER:
  1962. put_unaligned(sizeof(int), tl++);
  1963. put_unaligned(*(int *)val, (int *)tl);
  1964. tl = (unsigned short*)((char*)tl+sizeof(int));
  1965. break;
  1966. case TT_INT64:
  1967. put_unaligned(sizeof(u64), tl++);
  1968. put_unaligned(*(u64 *)val, (u64 *)tl);
  1969. tl = (unsigned short*)((char*)tl+sizeof(u64));
  1970. break;
  1971. default:
  1972. /* someone did something stupid. */
  1973. ;
  1974. }
  1975. return tl;
  1976. }
  1977. void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
  1978. {
  1979. char buffer[sizeof(struct cn_msg)+
  1980. sizeof(struct drbd_nl_cfg_reply)+
  1981. sizeof(struct get_state_tag_len_struct)+
  1982. sizeof(short int)];
  1983. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  1984. struct drbd_nl_cfg_reply *reply =
  1985. (struct drbd_nl_cfg_reply *)cn_reply->data;
  1986. unsigned short *tl = reply->tag_list;
  1987. /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
  1988. tl = get_state_to_tags(mdev, (struct get_state *)&state, tl);
  1989. put_unaligned(TT_END, tl++); /* Close the tag list */
  1990. cn_reply->id.idx = CN_IDX_DRBD;
  1991. cn_reply->id.val = CN_VAL_DRBD;
  1992. cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
  1993. cn_reply->ack = 0; /* not used here. */
  1994. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  1995. (int)((char *)tl - (char *)reply->tag_list);
  1996. cn_reply->flags = 0;
  1997. reply->packet_type = P_get_state;
  1998. reply->minor = mdev_to_minor(mdev);
  1999. reply->ret_code = NO_ERROR;
  2000. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2001. }
  2002. void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
  2003. {
  2004. char buffer[sizeof(struct cn_msg)+
  2005. sizeof(struct drbd_nl_cfg_reply)+
  2006. sizeof(struct call_helper_tag_len_struct)+
  2007. sizeof(short int)];
  2008. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  2009. struct drbd_nl_cfg_reply *reply =
  2010. (struct drbd_nl_cfg_reply *)cn_reply->data;
  2011. unsigned short *tl = reply->tag_list;
  2012. /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
  2013. tl = tl_add_str(tl, T_helper, helper_name);
  2014. put_unaligned(TT_END, tl++); /* Close the tag list */
  2015. cn_reply->id.idx = CN_IDX_DRBD;
  2016. cn_reply->id.val = CN_VAL_DRBD;
  2017. cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
  2018. cn_reply->ack = 0; /* not used here. */
  2019. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  2020. (int)((char *)tl - (char *)reply->tag_list);
  2021. cn_reply->flags = 0;
  2022. reply->packet_type = P_call_helper;
  2023. reply->minor = mdev_to_minor(mdev);
  2024. reply->ret_code = NO_ERROR;
  2025. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2026. }
  2027. void drbd_bcast_ee(struct drbd_conf *mdev,
  2028. const char *reason, const int dgs,
  2029. const char* seen_hash, const char* calc_hash,
  2030. const struct drbd_epoch_entry* e)
  2031. {
  2032. struct cn_msg *cn_reply;
  2033. struct drbd_nl_cfg_reply *reply;
  2034. unsigned short *tl;
  2035. struct page *page;
  2036. unsigned len;
  2037. if (!e)
  2038. return;
  2039. if (!reason || !reason[0])
  2040. return;
  2041. /* apparently we have to memcpy twice, first to prepare the data for the
  2042. * struct cn_msg, then within cn_netlink_send from the cn_msg to the
  2043. * netlink skb. */
  2044. /* receiver thread context, which is not in the writeout path (of this node),
  2045. * but may be in the writeout path of the _other_ node.
  2046. * GFP_NOIO to avoid potential "distributed deadlock". */
  2047. cn_reply = kzalloc(
  2048. sizeof(struct cn_msg)+
  2049. sizeof(struct drbd_nl_cfg_reply)+
  2050. sizeof(struct dump_ee_tag_len_struct)+
  2051. sizeof(short int),
  2052. GFP_NOIO);
  2053. if (!cn_reply) {
  2054. dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
  2055. (unsigned long long)e->sector, e->size);
  2056. return;
  2057. }
  2058. reply = (struct drbd_nl_cfg_reply*)cn_reply->data;
  2059. tl = reply->tag_list;
  2060. tl = tl_add_str(tl, T_dump_ee_reason, reason);
  2061. tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs);
  2062. tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs);
  2063. tl = tl_add_int(tl, T_ee_sector, &e->sector);
  2064. tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
  2065. /* dump the first 32k */
  2066. len = min_t(unsigned, e->size, 32 << 10);
  2067. put_unaligned(T_ee_data, tl++);
  2068. put_unaligned(len, tl++);
  2069. page = e->pages;
  2070. page_chain_for_each(page) {
  2071. void *d = kmap_atomic(page, KM_USER0);
  2072. unsigned l = min_t(unsigned, len, PAGE_SIZE);
  2073. memcpy(tl, d, l);
  2074. kunmap_atomic(d, KM_USER0);
  2075. tl = (unsigned short*)((char*)tl + l);
  2076. len -= l;
  2077. if (len == 0)
  2078. break;
  2079. }
  2080. put_unaligned(TT_END, tl++); /* Close the tag list */
  2081. cn_reply->id.idx = CN_IDX_DRBD;
  2082. cn_reply->id.val = CN_VAL_DRBD;
  2083. cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
  2084. cn_reply->ack = 0; // not used here.
  2085. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  2086. (int)((char*)tl - (char*)reply->tag_list);
  2087. cn_reply->flags = 0;
  2088. reply->packet_type = P_dump_ee;
  2089. reply->minor = mdev_to_minor(mdev);
  2090. reply->ret_code = NO_ERROR;
  2091. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2092. kfree(cn_reply);
  2093. }
  2094. void drbd_bcast_sync_progress(struct drbd_conf *mdev)
  2095. {
  2096. char buffer[sizeof(struct cn_msg)+
  2097. sizeof(struct drbd_nl_cfg_reply)+
  2098. sizeof(struct sync_progress_tag_len_struct)+
  2099. sizeof(short int)];
  2100. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  2101. struct drbd_nl_cfg_reply *reply =
  2102. (struct drbd_nl_cfg_reply *)cn_reply->data;
  2103. unsigned short *tl = reply->tag_list;
  2104. unsigned long rs_left;
  2105. unsigned int res;
  2106. /* no local ref, no bitmap, no syncer progress, no broadcast. */
  2107. if (!get_ldev(mdev))
  2108. return;
  2109. drbd_get_syncer_progress(mdev, &rs_left, &res);
  2110. put_ldev(mdev);
  2111. tl = tl_add_int(tl, T_sync_progress, &res);
  2112. put_unaligned(TT_END, tl++); /* Close the tag list */
  2113. cn_reply->id.idx = CN_IDX_DRBD;
  2114. cn_reply->id.val = CN_VAL_DRBD;
  2115. cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
  2116. cn_reply->ack = 0; /* not used here. */
  2117. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
  2118. (int)((char *)tl - (char *)reply->tag_list);
  2119. cn_reply->flags = 0;
  2120. reply->packet_type = P_sync_progress;
  2121. reply->minor = mdev_to_minor(mdev);
  2122. reply->ret_code = NO_ERROR;
  2123. cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2124. }
  2125. int __init drbd_nl_init(void)
  2126. {
  2127. static struct cb_id cn_id_drbd;
  2128. int err, try=10;
  2129. cn_id_drbd.val = CN_VAL_DRBD;
  2130. do {
  2131. cn_id_drbd.idx = cn_idx;
  2132. err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback);
  2133. if (!err)
  2134. break;
  2135. cn_idx = (cn_idx + CN_IDX_STEP);
  2136. } while (try--);
  2137. if (err) {
  2138. printk(KERN_ERR "drbd: cn_drbd failed to register\n");
  2139. return err;
  2140. }
  2141. return 0;
  2142. }
  2143. void drbd_nl_cleanup(void)
  2144. {
  2145. static struct cb_id cn_id_drbd;
  2146. cn_id_drbd.idx = cn_idx;
  2147. cn_id_drbd.val = CN_VAL_DRBD;
  2148. cn_del_callback(&cn_id_drbd);
  2149. }
  2150. void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
  2151. {
  2152. char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)];
  2153. struct cn_msg *cn_reply = (struct cn_msg *) buffer;
  2154. struct drbd_nl_cfg_reply *reply =
  2155. (struct drbd_nl_cfg_reply *)cn_reply->data;
  2156. int rr;
  2157. memset(buffer, 0, sizeof(buffer));
  2158. cn_reply->id = req->id;
  2159. cn_reply->seq = req->seq;
  2160. cn_reply->ack = req->ack + 1;
  2161. cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
  2162. cn_reply->flags = 0;
  2163. reply->packet_type = P_return_code_only;
  2164. reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
  2165. reply->ret_code = ret_code;
  2166. rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
  2167. if (rr && rr != -ESRCH)
  2168. printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
  2169. }