drbd_nl.c 84 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158
  1. /*
  2. drbd_nl.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/drbd.h>
  21. #include <linux/in.h>
  22. #include <linux/fs.h>
  23. #include <linux/file.h>
  24. #include <linux/slab.h>
  25. #include <linux/blkpg.h>
  26. #include <linux/cpumask.h>
  27. #include "drbd_int.h"
  28. #include "drbd_req.h"
  29. #include "drbd_wrappers.h"
  30. #include <asm/unaligned.h>
  31. #include <linux/drbd_limits.h>
  32. #include <linux/kthread.h>
  33. #include <net/genetlink.h>
  34. /* .doit */
  35. // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
  36. // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
  37. int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
  38. int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
  39. int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
  40. int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
  41. int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
  42. int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
  43. int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
  44. int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
  45. int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
  46. int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
  47. int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
  48. int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
  49. int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
  50. int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
  51. int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
  52. int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
  53. int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
  54. int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
  55. int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
  56. int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
  57. int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
  58. int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
  59. int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
  60. int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
  61. int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
  62. /* .dumpit */
  63. int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
  64. #include <linux/drbd_genl_api.h>
  65. #include <linux/genl_magic_func.h>
  66. /* used blkdev_get_by_path, to claim our meta data device(s) */
  67. static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
  68. /* Configuration is strictly serialized, because generic netlink message
  69. * processing is strictly serialized by the genl_lock().
  70. * Which means we can use one static global drbd_config_context struct.
  71. */
  72. static struct drbd_config_context {
  73. /* assigned from drbd_genlmsghdr */
  74. unsigned int minor;
  75. /* assigned from request attributes, if present */
  76. unsigned int volume;
  77. #define VOLUME_UNSPECIFIED (-1U)
  78. /* pointer into the request skb,
  79. * limited lifetime! */
  80. char *conn_name;
  81. /* reply buffer */
  82. struct sk_buff *reply_skb;
  83. /* pointer into reply buffer */
  84. struct drbd_genlmsghdr *reply_dh;
  85. /* resolved from attributes, if possible */
  86. struct drbd_conf *mdev;
  87. struct drbd_tconn *tconn;
  88. } adm_ctx;
  89. static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
  90. {
  91. genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
  92. if (genlmsg_reply(skb, info))
  93. printk(KERN_ERR "drbd: error sending genl reply\n");
  94. }
  95. /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
  96. * reason it could fail was no space in skb, and there are 4k available. */
  97. int drbd_msg_put_info(const char *info)
  98. {
  99. struct sk_buff *skb = adm_ctx.reply_skb;
  100. struct nlattr *nla;
  101. int err = -EMSGSIZE;
  102. if (!info || !info[0])
  103. return 0;
  104. nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
  105. if (!nla)
  106. return err;
  107. err = nla_put_string(skb, T_info_text, info);
  108. if (err) {
  109. nla_nest_cancel(skb, nla);
  110. return err;
  111. } else
  112. nla_nest_end(skb, nla);
  113. return 0;
  114. }
  115. /* This would be a good candidate for a "pre_doit" hook,
  116. * and per-family private info->pointers.
  117. * But we need to stay compatible with older kernels.
  118. * If it returns successfully, adm_ctx members are valid.
  119. */
  120. #define DRBD_ADM_NEED_MINOR 1
  121. #define DRBD_ADM_NEED_CONN 2
  122. static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
  123. unsigned flags)
  124. {
  125. struct drbd_genlmsghdr *d_in = info->userhdr;
  126. const u8 cmd = info->genlhdr->cmd;
  127. int err;
  128. memset(&adm_ctx, 0, sizeof(adm_ctx));
  129. /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
  130. if (cmd != DRBD_ADM_GET_STATUS
  131. && security_netlink_recv(skb, CAP_SYS_ADMIN))
  132. return -EPERM;
  133. adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  134. if (!adm_ctx.reply_skb)
  135. goto fail;
  136. adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
  137. info, &drbd_genl_family, 0, cmd);
  138. /* put of a few bytes into a fresh skb of >= 4k will always succeed.
  139. * but anyways */
  140. if (!adm_ctx.reply_dh)
  141. goto fail;
  142. adm_ctx.reply_dh->minor = d_in->minor;
  143. adm_ctx.reply_dh->ret_code = NO_ERROR;
  144. if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
  145. struct nlattr *nla;
  146. /* parse and validate only */
  147. err = drbd_cfg_context_from_attrs(NULL, info);
  148. if (err)
  149. goto fail;
  150. /* It was present, and valid,
  151. * copy it over to the reply skb. */
  152. err = nla_put_nohdr(adm_ctx.reply_skb,
  153. info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
  154. info->attrs[DRBD_NLA_CFG_CONTEXT]);
  155. if (err)
  156. goto fail;
  157. /* and assign stuff to the global adm_ctx */
  158. nla = nested_attr_tb[__nla_type(T_ctx_volume)];
  159. adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
  160. nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
  161. if (nla)
  162. adm_ctx.conn_name = nla_data(nla);
  163. } else
  164. adm_ctx.volume = VOLUME_UNSPECIFIED;
  165. adm_ctx.minor = d_in->minor;
  166. adm_ctx.mdev = minor_to_mdev(d_in->minor);
  167. adm_ctx.tconn = conn_get_by_name(adm_ctx.conn_name);
  168. if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
  169. drbd_msg_put_info("unknown minor");
  170. return ERR_MINOR_INVALID;
  171. }
  172. if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
  173. drbd_msg_put_info("unknown connection");
  174. return ERR_INVALID_REQUEST;
  175. }
  176. /* some more paranoia, if the request was over-determined */
  177. if (adm_ctx.mdev && adm_ctx.tconn &&
  178. adm_ctx.mdev->tconn != adm_ctx.tconn) {
  179. pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
  180. adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
  181. drbd_msg_put_info("minor exists in different connection");
  182. return ERR_INVALID_REQUEST;
  183. }
  184. if (adm_ctx.mdev &&
  185. adm_ctx.volume != VOLUME_UNSPECIFIED &&
  186. adm_ctx.volume != adm_ctx.mdev->vnr) {
  187. pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
  188. adm_ctx.minor, adm_ctx.volume,
  189. adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
  190. drbd_msg_put_info("minor exists as different volume");
  191. return ERR_INVALID_REQUEST;
  192. }
  193. return NO_ERROR;
  194. fail:
  195. nlmsg_free(adm_ctx.reply_skb);
  196. adm_ctx.reply_skb = NULL;
  197. return -ENOMEM;
  198. }
  199. static int drbd_adm_finish(struct genl_info *info, int retcode)
  200. {
  201. struct nlattr *nla;
  202. const char *conn_name = NULL;
  203. if (adm_ctx.tconn) {
  204. kref_put(&adm_ctx.tconn->kref, &conn_destroy);
  205. adm_ctx.tconn = NULL;
  206. }
  207. if (!adm_ctx.reply_skb)
  208. return -ENOMEM;
  209. adm_ctx.reply_dh->ret_code = retcode;
  210. nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
  211. if (nla) {
  212. nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
  213. if (nla)
  214. conn_name = nla_data(nla);
  215. }
  216. drbd_adm_send_reply(adm_ctx.reply_skb, info);
  217. return 0;
  218. }
  219. static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
  220. {
  221. char *afs;
  222. struct net_conf *nc;
  223. rcu_read_lock();
  224. nc = rcu_dereference(tconn->net_conf);
  225. if (nc) {
  226. switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
  227. case AF_INET6:
  228. afs = "ipv6";
  229. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
  230. &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
  231. break;
  232. case AF_INET:
  233. afs = "ipv4";
  234. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
  235. &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
  236. break;
  237. default:
  238. afs = "ssocks";
  239. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
  240. &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
  241. }
  242. snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
  243. }
  244. rcu_read_unlock();
  245. }
  246. int drbd_khelper(struct drbd_conf *mdev, char *cmd)
  247. {
  248. char *envp[] = { "HOME=/",
  249. "TERM=linux",
  250. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  251. (char[20]) { }, /* address family */
  252. (char[60]) { }, /* address */
  253. NULL };
  254. char mb[12];
  255. char *argv[] = {usermode_helper, cmd, mb, NULL };
  256. struct sib_info sib;
  257. int ret;
  258. snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
  259. setup_khelper_env(mdev->tconn, envp);
  260. /* The helper may take some time.
  261. * write out any unsynced meta data changes now */
  262. drbd_md_sync(mdev);
  263. dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
  264. sib.sib_reason = SIB_HELPER_PRE;
  265. sib.helper_name = cmd;
  266. drbd_bcast_event(mdev, &sib);
  267. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  268. if (ret)
  269. dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  270. usermode_helper, cmd, mb,
  271. (ret >> 8) & 0xff, ret);
  272. else
  273. dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  274. usermode_helper, cmd, mb,
  275. (ret >> 8) & 0xff, ret);
  276. sib.sib_reason = SIB_HELPER_POST;
  277. sib.helper_exit_code = ret;
  278. drbd_bcast_event(mdev, &sib);
  279. if (ret < 0) /* Ignore any ERRNOs we got. */
  280. ret = 0;
  281. return ret;
  282. }
  283. static void conn_md_sync(struct drbd_tconn *tconn)
  284. {
  285. struct drbd_conf *mdev;
  286. int vnr;
  287. down_read(&drbd_cfg_rwsem);
  288. idr_for_each_entry(&tconn->volumes, mdev, vnr)
  289. drbd_md_sync(mdev);
  290. up_read(&drbd_cfg_rwsem);
  291. }
  292. int conn_khelper(struct drbd_tconn *tconn, char *cmd)
  293. {
  294. char *envp[] = { "HOME=/",
  295. "TERM=linux",
  296. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  297. (char[20]) { }, /* address family */
  298. (char[60]) { }, /* address */
  299. NULL };
  300. char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
  301. int ret;
  302. setup_khelper_env(tconn, envp);
  303. conn_md_sync(tconn);
  304. conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
  305. /* TODO: conn_bcast_event() ?? */
  306. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  307. if (ret)
  308. conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
  309. usermode_helper, cmd, tconn->name,
  310. (ret >> 8) & 0xff, ret);
  311. else
  312. conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
  313. usermode_helper, cmd, tconn->name,
  314. (ret >> 8) & 0xff, ret);
  315. /* TODO: conn_bcast_event() ?? */
  316. if (ret < 0) /* Ignore any ERRNOs we got. */
  317. ret = 0;
  318. return ret;
  319. }
  320. static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
  321. {
  322. enum drbd_fencing_p fp = FP_NOT_AVAIL;
  323. struct drbd_conf *mdev;
  324. int vnr;
  325. rcu_read_lock();
  326. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  327. if (get_ldev_if_state(mdev, D_CONSISTENT)) {
  328. fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
  329. put_ldev(mdev);
  330. }
  331. }
  332. rcu_read_unlock();
  333. return fp;
  334. }
  335. bool conn_try_outdate_peer(struct drbd_tconn *tconn)
  336. {
  337. union drbd_state mask = { };
  338. union drbd_state val = { };
  339. enum drbd_fencing_p fp;
  340. char *ex_to_string;
  341. int r;
  342. if (tconn->cstate >= C_WF_REPORT_PARAMS) {
  343. conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
  344. return false;
  345. }
  346. fp = highest_fencing_policy(tconn);
  347. switch (fp) {
  348. case FP_NOT_AVAIL:
  349. conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
  350. goto out;
  351. case FP_DONT_CARE:
  352. return true;
  353. default: ;
  354. }
  355. r = conn_khelper(tconn, "fence-peer");
  356. switch ((r>>8) & 0xff) {
  357. case 3: /* peer is inconsistent */
  358. ex_to_string = "peer is inconsistent or worse";
  359. mask.pdsk = D_MASK;
  360. val.pdsk = D_INCONSISTENT;
  361. break;
  362. case 4: /* peer got outdated, or was already outdated */
  363. ex_to_string = "peer was fenced";
  364. mask.pdsk = D_MASK;
  365. val.pdsk = D_OUTDATED;
  366. break;
  367. case 5: /* peer was down */
  368. if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
  369. /* we will(have) create(d) a new UUID anyways... */
  370. ex_to_string = "peer is unreachable, assumed to be dead";
  371. mask.pdsk = D_MASK;
  372. val.pdsk = D_OUTDATED;
  373. } else {
  374. ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
  375. }
  376. break;
  377. case 6: /* Peer is primary, voluntarily outdate myself.
  378. * This is useful when an unconnected R_SECONDARY is asked to
  379. * become R_PRIMARY, but finds the other peer being active. */
  380. ex_to_string = "peer is active";
  381. conn_warn(tconn, "Peer is primary, outdating myself.\n");
  382. mask.disk = D_MASK;
  383. val.disk = D_OUTDATED;
  384. break;
  385. case 7:
  386. if (fp != FP_STONITH)
  387. conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
  388. ex_to_string = "peer was stonithed";
  389. mask.pdsk = D_MASK;
  390. val.pdsk = D_OUTDATED;
  391. break;
  392. default:
  393. /* The script is broken ... */
  394. conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
  395. return false; /* Eventually leave IO frozen */
  396. }
  397. conn_info(tconn, "fence-peer helper returned %d (%s)\n",
  398. (r>>8) & 0xff, ex_to_string);
  399. out:
  400. /* Not using
  401. conn_request_state(tconn, mask, val, CS_VERBOSE);
  402. here, because we might were able to re-establish the connection in the
  403. meantime. */
  404. spin_lock_irq(&tconn->req_lock);
  405. if (tconn->cstate < C_WF_REPORT_PARAMS)
  406. _conn_request_state(tconn, mask, val, CS_VERBOSE);
  407. spin_unlock_irq(&tconn->req_lock);
  408. return conn_highest_pdsk(tconn) <= D_OUTDATED;
  409. }
  410. static int _try_outdate_peer_async(void *data)
  411. {
  412. struct drbd_tconn *tconn = (struct drbd_tconn *)data;
  413. conn_try_outdate_peer(tconn);
  414. kref_put(&tconn->kref, &conn_destroy);
  415. return 0;
  416. }
  417. void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
  418. {
  419. struct task_struct *opa;
  420. kref_get(&tconn->kref);
  421. opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
  422. if (IS_ERR(opa)) {
  423. conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
  424. kref_put(&tconn->kref, &conn_destroy);
  425. }
  426. }
  427. enum drbd_state_rv
  428. drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
  429. {
  430. const int max_tries = 4;
  431. enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
  432. struct net_conf *nc;
  433. int try = 0;
  434. int forced = 0;
  435. union drbd_state mask, val;
  436. if (new_role == R_PRIMARY)
  437. request_ping(mdev->tconn); /* Detect a dead peer ASAP */
  438. mutex_lock(mdev->state_mutex);
  439. mask.i = 0; mask.role = R_MASK;
  440. val.i = 0; val.role = new_role;
  441. while (try++ < max_tries) {
  442. rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
  443. /* in case we first succeeded to outdate,
  444. * but now suddenly could establish a connection */
  445. if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
  446. val.pdsk = 0;
  447. mask.pdsk = 0;
  448. continue;
  449. }
  450. if (rv == SS_NO_UP_TO_DATE_DISK && force &&
  451. (mdev->state.disk < D_UP_TO_DATE &&
  452. mdev->state.disk >= D_INCONSISTENT)) {
  453. mask.disk = D_MASK;
  454. val.disk = D_UP_TO_DATE;
  455. forced = 1;
  456. continue;
  457. }
  458. if (rv == SS_NO_UP_TO_DATE_DISK &&
  459. mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
  460. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  461. if (conn_try_outdate_peer(mdev->tconn)) {
  462. val.disk = D_UP_TO_DATE;
  463. mask.disk = D_MASK;
  464. }
  465. continue;
  466. }
  467. if (rv == SS_NOTHING_TO_DO)
  468. goto out;
  469. if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
  470. if (!conn_try_outdate_peer(mdev->tconn) && force) {
  471. dev_warn(DEV, "Forced into split brain situation!\n");
  472. mask.pdsk = D_MASK;
  473. val.pdsk = D_OUTDATED;
  474. }
  475. continue;
  476. }
  477. if (rv == SS_TWO_PRIMARIES) {
  478. /* Maybe the peer is detected as dead very soon...
  479. retry at most once more in this case. */
  480. int timeo;
  481. rcu_read_lock();
  482. nc = rcu_dereference(mdev->tconn->net_conf);
  483. timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
  484. rcu_read_unlock();
  485. schedule_timeout_interruptible(timeo);
  486. if (try < max_tries)
  487. try = max_tries - 1;
  488. continue;
  489. }
  490. if (rv < SS_SUCCESS) {
  491. rv = _drbd_request_state(mdev, mask, val,
  492. CS_VERBOSE + CS_WAIT_COMPLETE);
  493. if (rv < SS_SUCCESS)
  494. goto out;
  495. }
  496. break;
  497. }
  498. if (rv < SS_SUCCESS)
  499. goto out;
  500. if (forced)
  501. dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
  502. /* Wait until nothing is on the fly :) */
  503. wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
  504. if (new_role == R_SECONDARY) {
  505. set_disk_ro(mdev->vdisk, true);
  506. if (get_ldev(mdev)) {
  507. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  508. put_ldev(mdev);
  509. }
  510. } else {
  511. mutex_lock(&mdev->tconn->net_conf_update);
  512. nc = mdev->tconn->net_conf;
  513. if (nc)
  514. nc->want_lose = 0; /* without copy; single bit op is atomic */
  515. mutex_unlock(&mdev->tconn->net_conf_update);
  516. set_disk_ro(mdev->vdisk, false);
  517. if (get_ldev(mdev)) {
  518. if (((mdev->state.conn < C_CONNECTED ||
  519. mdev->state.pdsk <= D_FAILED)
  520. && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
  521. drbd_uuid_new_current(mdev);
  522. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  523. put_ldev(mdev);
  524. }
  525. }
  526. /* writeout of activity log covered areas of the bitmap
  527. * to stable storage done in after state change already */
  528. if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
  529. /* if this was forced, we should consider sync */
  530. if (forced)
  531. drbd_send_uuids(mdev);
  532. drbd_send_state(mdev);
  533. }
  534. drbd_md_sync(mdev);
  535. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  536. out:
  537. mutex_unlock(mdev->state_mutex);
  538. return rv;
  539. }
  540. static const char *from_attrs_err_to_txt(int err)
  541. {
  542. return err == -ENOMSG ? "required attribute missing" :
  543. err == -EOPNOTSUPP ? "unknown mandatory attribute" :
  544. err == -EEXIST ? "can not change invariant setting" :
  545. "invalid attribute value";
  546. }
  547. int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
  548. {
  549. struct set_role_parms parms;
  550. int err;
  551. enum drbd_ret_code retcode;
  552. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  553. if (!adm_ctx.reply_skb)
  554. return retcode;
  555. if (retcode != NO_ERROR)
  556. goto out;
  557. memset(&parms, 0, sizeof(parms));
  558. if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
  559. err = set_role_parms_from_attrs(&parms, info);
  560. if (err) {
  561. retcode = ERR_MANDATORY_TAG;
  562. drbd_msg_put_info(from_attrs_err_to_txt(err));
  563. goto out;
  564. }
  565. }
  566. if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
  567. retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
  568. else
  569. retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
  570. out:
  571. drbd_adm_finish(info, retcode);
  572. return 0;
  573. }
  574. /* initializes the md.*_offset members, so we are able to find
  575. * the on disk meta data */
  576. static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
  577. struct drbd_backing_dev *bdev)
  578. {
  579. sector_t md_size_sect = 0;
  580. switch (bdev->dc.meta_dev_idx) {
  581. default:
  582. /* v07 style fixed size indexed meta data */
  583. bdev->md.md_size_sect = MD_RESERVED_SECT;
  584. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  585. bdev->md.al_offset = MD_AL_OFFSET;
  586. bdev->md.bm_offset = MD_BM_OFFSET;
  587. break;
  588. case DRBD_MD_INDEX_FLEX_EXT:
  589. /* just occupy the full device; unit: sectors */
  590. bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
  591. bdev->md.md_offset = 0;
  592. bdev->md.al_offset = MD_AL_OFFSET;
  593. bdev->md.bm_offset = MD_BM_OFFSET;
  594. break;
  595. case DRBD_MD_INDEX_INTERNAL:
  596. case DRBD_MD_INDEX_FLEX_INT:
  597. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  598. /* al size is still fixed */
  599. bdev->md.al_offset = -MD_AL_SECTORS;
  600. /* we need (slightly less than) ~ this much bitmap sectors: */
  601. md_size_sect = drbd_get_capacity(bdev->backing_bdev);
  602. md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
  603. md_size_sect = BM_SECT_TO_EXT(md_size_sect);
  604. md_size_sect = ALIGN(md_size_sect, 8);
  605. /* plus the "drbd meta data super block",
  606. * and the activity log; */
  607. md_size_sect += MD_BM_OFFSET;
  608. bdev->md.md_size_sect = md_size_sect;
  609. /* bitmap offset is adjusted by 'super' block size */
  610. bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
  611. break;
  612. }
  613. }
  614. /* input size is expected to be in KB */
  615. char *ppsize(char *buf, unsigned long long size)
  616. {
  617. /* Needs 9 bytes at max including trailing NUL:
  618. * -1ULL ==> "16384 EB" */
  619. static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
  620. int base = 0;
  621. while (size >= 10000 && base < sizeof(units)-1) {
  622. /* shift + round */
  623. size = (size >> 10) + !!(size & (1<<9));
  624. base++;
  625. }
  626. sprintf(buf, "%u %cB", (unsigned)size, units[base]);
  627. return buf;
  628. }
  629. /* there is still a theoretical deadlock when called from receiver
  630. * on an D_INCONSISTENT R_PRIMARY:
  631. * remote READ does inc_ap_bio, receiver would need to receive answer
  632. * packet from remote to dec_ap_bio again.
  633. * receiver receive_sizes(), comes here,
  634. * waits for ap_bio_cnt == 0. -> deadlock.
  635. * but this cannot happen, actually, because:
  636. * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
  637. * (not connected, or bad/no disk on peer):
  638. * see drbd_fail_request_early, ap_bio_cnt is zero.
  639. * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
  640. * peer may not initiate a resize.
  641. */
  642. /* Note these are not to be confused with
  643. * drbd_adm_suspend_io/drbd_adm_resume_io,
  644. * which are (sub) state changes triggered by admin (drbdsetup),
  645. * and can be long lived.
  646. * This changes an mdev->flag, is triggered by drbd internals,
  647. * and should be short-lived. */
  648. void drbd_suspend_io(struct drbd_conf *mdev)
  649. {
  650. set_bit(SUSPEND_IO, &mdev->flags);
  651. if (drbd_suspended(mdev))
  652. return;
  653. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
  654. }
  655. void drbd_resume_io(struct drbd_conf *mdev)
  656. {
  657. clear_bit(SUSPEND_IO, &mdev->flags);
  658. wake_up(&mdev->misc_wait);
  659. }
  660. /**
  661. * drbd_determine_dev_size() - Sets the right device size obeying all constraints
  662. * @mdev: DRBD device.
  663. *
  664. * Returns 0 on success, negative return values indicate errors.
  665. * You should call drbd_md_sync() after calling this function.
  666. */
  667. enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
  668. {
  669. sector_t prev_first_sect, prev_size; /* previous meta location */
  670. sector_t la_size;
  671. sector_t size;
  672. char ppb[10];
  673. int md_moved, la_size_changed;
  674. enum determine_dev_size rv = unchanged;
  675. /* race:
  676. * application request passes inc_ap_bio,
  677. * but then cannot get an AL-reference.
  678. * this function later may wait on ap_bio_cnt == 0. -> deadlock.
  679. *
  680. * to avoid that:
  681. * Suspend IO right here.
  682. * still lock the act_log to not trigger ASSERTs there.
  683. */
  684. drbd_suspend_io(mdev);
  685. /* no wait necessary anymore, actually we could assert that */
  686. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  687. prev_first_sect = drbd_md_first_sector(mdev->ldev);
  688. prev_size = mdev->ldev->md.md_size_sect;
  689. la_size = mdev->ldev->md.la_size_sect;
  690. /* TODO: should only be some assert here, not (re)init... */
  691. drbd_md_set_sector_offsets(mdev, mdev->ldev);
  692. size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
  693. if (drbd_get_capacity(mdev->this_bdev) != size ||
  694. drbd_bm_capacity(mdev) != size) {
  695. int err;
  696. err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
  697. if (unlikely(err)) {
  698. /* currently there is only one error: ENOMEM! */
  699. size = drbd_bm_capacity(mdev)>>1;
  700. if (size == 0) {
  701. dev_err(DEV, "OUT OF MEMORY! "
  702. "Could not allocate bitmap!\n");
  703. } else {
  704. dev_err(DEV, "BM resizing failed. "
  705. "Leaving size unchanged at size = %lu KB\n",
  706. (unsigned long)size);
  707. }
  708. rv = dev_size_error;
  709. }
  710. /* racy, see comments above. */
  711. drbd_set_my_capacity(mdev, size);
  712. mdev->ldev->md.la_size_sect = size;
  713. dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
  714. (unsigned long long)size>>1);
  715. }
  716. if (rv == dev_size_error)
  717. goto out;
  718. la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
  719. md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
  720. || prev_size != mdev->ldev->md.md_size_sect;
  721. if (la_size_changed || md_moved) {
  722. int err;
  723. drbd_al_shrink(mdev); /* All extents inactive. */
  724. dev_info(DEV, "Writing the whole bitmap, %s\n",
  725. la_size_changed && md_moved ? "size changed and md moved" :
  726. la_size_changed ? "size changed" : "md moved");
  727. /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
  728. err = drbd_bitmap_io(mdev, &drbd_bm_write,
  729. "size changed", BM_LOCKED_MASK);
  730. if (err) {
  731. rv = dev_size_error;
  732. goto out;
  733. }
  734. drbd_md_mark_dirty(mdev);
  735. }
  736. if (size > la_size)
  737. rv = grew;
  738. if (size < la_size)
  739. rv = shrunk;
  740. out:
  741. lc_unlock(mdev->act_log);
  742. wake_up(&mdev->al_wait);
  743. drbd_resume_io(mdev);
  744. return rv;
  745. }
  746. sector_t
  747. drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
  748. {
  749. sector_t p_size = mdev->p_size; /* partner's disk size. */
  750. sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
  751. sector_t m_size; /* my size */
  752. sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
  753. sector_t size = 0;
  754. m_size = drbd_get_max_capacity(bdev);
  755. if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
  756. dev_warn(DEV, "Resize while not connected was forced by the user!\n");
  757. p_size = m_size;
  758. }
  759. if (p_size && m_size) {
  760. size = min_t(sector_t, p_size, m_size);
  761. } else {
  762. if (la_size) {
  763. size = la_size;
  764. if (m_size && m_size < size)
  765. size = m_size;
  766. if (p_size && p_size < size)
  767. size = p_size;
  768. } else {
  769. if (m_size)
  770. size = m_size;
  771. if (p_size)
  772. size = p_size;
  773. }
  774. }
  775. if (size == 0)
  776. dev_err(DEV, "Both nodes diskless!\n");
  777. if (u_size) {
  778. if (u_size > size)
  779. dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
  780. (unsigned long)u_size>>1, (unsigned long)size>>1);
  781. else
  782. size = u_size;
  783. }
  784. return size;
  785. }
  786. /**
  787. * drbd_check_al_size() - Ensures that the AL is of the right size
  788. * @mdev: DRBD device.
  789. *
  790. * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
  791. * failed, and 0 on success. You should call drbd_md_sync() after you called
  792. * this function.
  793. */
  794. static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
  795. {
  796. struct lru_cache *n, *t;
  797. struct lc_element *e;
  798. unsigned int in_use;
  799. int i;
  800. if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
  801. dc->al_extents = DRBD_AL_EXTENTS_MIN;
  802. if (mdev->act_log &&
  803. mdev->act_log->nr_elements == dc->al_extents)
  804. return 0;
  805. in_use = 0;
  806. t = mdev->act_log;
  807. n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
  808. dc->al_extents, sizeof(struct lc_element), 0);
  809. if (n == NULL) {
  810. dev_err(DEV, "Cannot allocate act_log lru!\n");
  811. return -ENOMEM;
  812. }
  813. spin_lock_irq(&mdev->al_lock);
  814. if (t) {
  815. for (i = 0; i < t->nr_elements; i++) {
  816. e = lc_element_by_index(t, i);
  817. if (e->refcnt)
  818. dev_err(DEV, "refcnt(%d)==%d\n",
  819. e->lc_number, e->refcnt);
  820. in_use += e->refcnt;
  821. }
  822. }
  823. if (!in_use)
  824. mdev->act_log = n;
  825. spin_unlock_irq(&mdev->al_lock);
  826. if (in_use) {
  827. dev_err(DEV, "Activity log still in use!\n");
  828. lc_destroy(n);
  829. return -EBUSY;
  830. } else {
  831. if (t)
  832. lc_destroy(t);
  833. }
  834. drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
  835. return 0;
  836. }
  837. static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
  838. {
  839. struct request_queue * const q = mdev->rq_queue;
  840. int max_hw_sectors = max_bio_size >> 9;
  841. int max_segments = 0;
  842. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  843. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  844. max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
  845. max_segments = mdev->ldev->dc.max_bio_bvecs;
  846. put_ldev(mdev);
  847. }
  848. blk_queue_logical_block_size(q, 512);
  849. blk_queue_max_hw_sectors(q, max_hw_sectors);
  850. /* This is the workaround for "bio would need to, but cannot, be split" */
  851. blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
  852. blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
  853. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  854. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  855. blk_queue_stack_limits(q, b);
  856. if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
  857. dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
  858. q->backing_dev_info.ra_pages,
  859. b->backing_dev_info.ra_pages);
  860. q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
  861. }
  862. put_ldev(mdev);
  863. }
  864. }
  865. void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
  866. {
  867. int now, new, local, peer;
  868. now = queue_max_hw_sectors(mdev->rq_queue) << 9;
  869. local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
  870. peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
  871. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  872. local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
  873. mdev->local_max_bio_size = local;
  874. put_ldev(mdev);
  875. }
  876. /* We may ignore peer limits if the peer is modern enough.
  877. Because new from 8.3.8 onwards the peer can use multiple
  878. BIOs for a single peer_request */
  879. if (mdev->state.conn >= C_CONNECTED) {
  880. if (mdev->tconn->agreed_pro_version < 94)
  881. peer = mdev->peer_max_bio_size;
  882. else if (mdev->tconn->agreed_pro_version == 94)
  883. peer = DRBD_MAX_SIZE_H80_PACKET;
  884. else /* drbd 8.3.8 onwards */
  885. peer = DRBD_MAX_BIO_SIZE;
  886. }
  887. new = min_t(int, local, peer);
  888. if (mdev->state.role == R_PRIMARY && new < now)
  889. dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
  890. if (new != now)
  891. dev_info(DEV, "max BIO size = %u\n", new);
  892. drbd_setup_queue_param(mdev, new);
  893. }
  894. /* Starts the worker thread */
  895. static void conn_reconfig_start(struct drbd_tconn *tconn)
  896. {
  897. drbd_thread_start(&tconn->worker);
  898. conn_flush_workqueue(tconn);
  899. }
  900. /* if still unconfigured, stops worker again. */
  901. static void conn_reconfig_done(struct drbd_tconn *tconn)
  902. {
  903. spin_lock_irq(&tconn->req_lock);
  904. if (conn_all_vols_unconf(tconn))
  905. drbd_thread_stop_nowait(&tconn->worker);
  906. spin_unlock_irq(&tconn->req_lock);
  907. }
  908. /* Make sure IO is suspended before calling this function(). */
  909. static void drbd_suspend_al(struct drbd_conf *mdev)
  910. {
  911. int s = 0;
  912. if (!lc_try_lock(mdev->act_log)) {
  913. dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
  914. return;
  915. }
  916. drbd_al_shrink(mdev);
  917. spin_lock_irq(&mdev->tconn->req_lock);
  918. if (mdev->state.conn < C_CONNECTED)
  919. s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
  920. spin_unlock_irq(&mdev->tconn->req_lock);
  921. lc_unlock(mdev->act_log);
  922. if (s)
  923. dev_info(DEV, "Suspended AL updates\n");
  924. }
  925. int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
  926. {
  927. enum drbd_ret_code retcode;
  928. struct drbd_conf *mdev;
  929. struct disk_conf *ndc; /* new disk conf */
  930. int err, fifo_size;
  931. int *rs_plan_s = NULL;
  932. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  933. if (!adm_ctx.reply_skb)
  934. return retcode;
  935. if (retcode != NO_ERROR)
  936. goto out;
  937. mdev = adm_ctx.mdev;
  938. /* we also need a disk
  939. * to change the options on */
  940. if (!get_ldev(mdev)) {
  941. retcode = ERR_NO_DISK;
  942. goto out;
  943. }
  944. /* FIXME freeze IO, cluster wide.
  945. *
  946. * We should make sure no-one uses
  947. * some half-updated struct when we
  948. * assign it later. */
  949. ndc = kmalloc(sizeof(*ndc), GFP_KERNEL);
  950. if (!ndc) {
  951. retcode = ERR_NOMEM;
  952. goto fail;
  953. }
  954. memcpy(ndc, &mdev->ldev->dc, sizeof(*ndc));
  955. err = disk_conf_from_attrs_for_change(ndc, info);
  956. if (err) {
  957. retcode = ERR_MANDATORY_TAG;
  958. drbd_msg_put_info(from_attrs_err_to_txt(err));
  959. }
  960. if (!expect(ndc->resync_rate >= 1))
  961. ndc->resync_rate = 1;
  962. /* clip to allowed range */
  963. if (!expect(ndc->al_extents >= DRBD_AL_EXTENTS_MIN))
  964. ndc->al_extents = DRBD_AL_EXTENTS_MIN;
  965. if (!expect(ndc->al_extents <= DRBD_AL_EXTENTS_MAX))
  966. ndc->al_extents = DRBD_AL_EXTENTS_MAX;
  967. /* most sanity checks done, try to assign the new sync-after
  968. * dependency. need to hold the global lock in there,
  969. * to avoid a race in the dependency loop check. */
  970. retcode = drbd_alter_sa(mdev, ndc->resync_after);
  971. if (retcode != NO_ERROR)
  972. goto fail;
  973. fifo_size = (ndc->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
  974. if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
  975. rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
  976. if (!rs_plan_s) {
  977. dev_err(DEV, "kmalloc of fifo_buffer failed");
  978. retcode = ERR_NOMEM;
  979. goto fail;
  980. }
  981. }
  982. if (fifo_size != mdev->rs_plan_s.size) {
  983. kfree(mdev->rs_plan_s.values);
  984. mdev->rs_plan_s.values = rs_plan_s;
  985. mdev->rs_plan_s.size = fifo_size;
  986. mdev->rs_planed = 0;
  987. rs_plan_s = NULL;
  988. }
  989. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  990. drbd_al_shrink(mdev);
  991. err = drbd_check_al_size(mdev, ndc);
  992. lc_unlock(mdev->act_log);
  993. wake_up(&mdev->al_wait);
  994. if (err) {
  995. retcode = ERR_NOMEM;
  996. goto fail;
  997. }
  998. /* FIXME
  999. * To avoid someone looking at a half-updated struct, we probably
  1000. * should have a rw-semaphor on net_conf and disk_conf.
  1001. */
  1002. mdev->ldev->dc = *ndc;
  1003. drbd_md_sync(mdev);
  1004. if (mdev->state.conn >= C_CONNECTED)
  1005. drbd_send_sync_param(mdev);
  1006. fail:
  1007. put_ldev(mdev);
  1008. kfree(ndc);
  1009. kfree(rs_plan_s);
  1010. out:
  1011. drbd_adm_finish(info, retcode);
  1012. return 0;
  1013. }
  1014. int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
  1015. {
  1016. struct drbd_conf *mdev;
  1017. int err;
  1018. enum drbd_ret_code retcode;
  1019. enum determine_dev_size dd;
  1020. sector_t max_possible_sectors;
  1021. sector_t min_md_device_sectors;
  1022. struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
  1023. struct block_device *bdev;
  1024. struct lru_cache *resync_lru = NULL;
  1025. union drbd_state ns, os;
  1026. enum drbd_state_rv rv;
  1027. struct net_conf *nc;
  1028. int cp_discovered = 0;
  1029. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1030. if (!adm_ctx.reply_skb)
  1031. return retcode;
  1032. if (retcode != NO_ERROR)
  1033. goto finish;
  1034. mdev = adm_ctx.mdev;
  1035. conn_reconfig_start(mdev->tconn);
  1036. /* if you want to reconfigure, please tear down first */
  1037. if (mdev->state.disk > D_DISKLESS) {
  1038. retcode = ERR_DISK_CONFIGURED;
  1039. goto fail;
  1040. }
  1041. /* It may just now have detached because of IO error. Make sure
  1042. * drbd_ldev_destroy is done already, we may end up here very fast,
  1043. * e.g. if someone calls attach from the on-io-error handler,
  1044. * to realize a "hot spare" feature (not that I'd recommend that) */
  1045. wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
  1046. /* allocation not in the IO path, drbdsetup context */
  1047. nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
  1048. if (!nbc) {
  1049. retcode = ERR_NOMEM;
  1050. goto fail;
  1051. }
  1052. nbc->dc = (struct disk_conf) {
  1053. {}, 0, /* backing_dev */
  1054. {}, 0, /* meta_dev */
  1055. 0, /* meta_dev_idx */
  1056. DRBD_DISK_SIZE_SECT_DEF, /* disk_size */
  1057. DRBD_MAX_BIO_BVECS_DEF, /* max_bio_bvecs */
  1058. DRBD_ON_IO_ERROR_DEF, /* on_io_error */
  1059. DRBD_FENCING_DEF, /* fencing */
  1060. DRBD_RATE_DEF, /* resync_rate */
  1061. DRBD_AFTER_DEF, /* resync_after */
  1062. DRBD_AL_EXTENTS_DEF, /* al_extents */
  1063. DRBD_C_PLAN_AHEAD_DEF, /* c_plan_ahead */
  1064. DRBD_C_DELAY_TARGET_DEF, /* c_delay_target */
  1065. DRBD_C_FILL_TARGET_DEF, /* c_fill_target */
  1066. DRBD_C_MAX_RATE_DEF, /* c_max_rate */
  1067. DRBD_C_MIN_RATE_DEF, /* c_min_rate */
  1068. 0, /* no_disk_barrier */
  1069. 0, /* no_disk_flush */
  1070. 0, /* no_disk_drain */
  1071. 0, /* no_md_flush */
  1072. };
  1073. err = disk_conf_from_attrs(&nbc->dc, info);
  1074. if (err) {
  1075. retcode = ERR_MANDATORY_TAG;
  1076. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1077. goto fail;
  1078. }
  1079. if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
  1080. retcode = ERR_MD_IDX_INVALID;
  1081. goto fail;
  1082. }
  1083. rcu_read_lock();
  1084. nc = rcu_dereference(mdev->tconn->net_conf);
  1085. if (nc) {
  1086. if (nbc->dc.fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
  1087. rcu_read_unlock();
  1088. retcode = ERR_STONITH_AND_PROT_A;
  1089. goto fail;
  1090. }
  1091. }
  1092. rcu_read_unlock();
  1093. bdev = blkdev_get_by_path(nbc->dc.backing_dev,
  1094. FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
  1095. if (IS_ERR(bdev)) {
  1096. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
  1097. PTR_ERR(bdev));
  1098. retcode = ERR_OPEN_DISK;
  1099. goto fail;
  1100. }
  1101. nbc->backing_bdev = bdev;
  1102. /*
  1103. * meta_dev_idx >= 0: external fixed size, possibly multiple
  1104. * drbd sharing one meta device. TODO in that case, paranoia
  1105. * check that [md_bdev, meta_dev_idx] is not yet used by some
  1106. * other drbd minor! (if you use drbd.conf + drbdadm, that
  1107. * should check it for you already; but if you don't, or
  1108. * someone fooled it, we need to double check here)
  1109. */
  1110. bdev = blkdev_get_by_path(nbc->dc.meta_dev,
  1111. FMODE_READ | FMODE_WRITE | FMODE_EXCL,
  1112. ((int)nbc->dc.meta_dev_idx < 0) ?
  1113. (void *)mdev : (void *)drbd_m_holder);
  1114. if (IS_ERR(bdev)) {
  1115. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
  1116. PTR_ERR(bdev));
  1117. retcode = ERR_OPEN_MD_DISK;
  1118. goto fail;
  1119. }
  1120. nbc->md_bdev = bdev;
  1121. if ((nbc->backing_bdev == nbc->md_bdev) !=
  1122. (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
  1123. nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
  1124. retcode = ERR_MD_IDX_INVALID;
  1125. goto fail;
  1126. }
  1127. resync_lru = lc_create("resync", drbd_bm_ext_cache,
  1128. 1, 61, sizeof(struct bm_extent),
  1129. offsetof(struct bm_extent, lce));
  1130. if (!resync_lru) {
  1131. retcode = ERR_NOMEM;
  1132. goto fail;
  1133. }
  1134. /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
  1135. drbd_md_set_sector_offsets(mdev, nbc);
  1136. if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
  1137. dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
  1138. (unsigned long long) drbd_get_max_capacity(nbc),
  1139. (unsigned long long) nbc->dc.disk_size);
  1140. retcode = ERR_DISK_TO_SMALL;
  1141. goto fail;
  1142. }
  1143. if ((int)nbc->dc.meta_dev_idx < 0) {
  1144. max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
  1145. /* at least one MB, otherwise it does not make sense */
  1146. min_md_device_sectors = (2<<10);
  1147. } else {
  1148. max_possible_sectors = DRBD_MAX_SECTORS;
  1149. min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
  1150. }
  1151. if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
  1152. retcode = ERR_MD_DISK_TO_SMALL;
  1153. dev_warn(DEV, "refusing attach: md-device too small, "
  1154. "at least %llu sectors needed for this meta-disk type\n",
  1155. (unsigned long long) min_md_device_sectors);
  1156. goto fail;
  1157. }
  1158. /* Make sure the new disk is big enough
  1159. * (we may currently be R_PRIMARY with no local disk...) */
  1160. if (drbd_get_max_capacity(nbc) <
  1161. drbd_get_capacity(mdev->this_bdev)) {
  1162. retcode = ERR_DISK_TO_SMALL;
  1163. goto fail;
  1164. }
  1165. nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
  1166. if (nbc->known_size > max_possible_sectors) {
  1167. dev_warn(DEV, "==> truncating very big lower level device "
  1168. "to currently maximum possible %llu sectors <==\n",
  1169. (unsigned long long) max_possible_sectors);
  1170. if ((int)nbc->dc.meta_dev_idx >= 0)
  1171. dev_warn(DEV, "==>> using internal or flexible "
  1172. "meta data may help <<==\n");
  1173. }
  1174. drbd_suspend_io(mdev);
  1175. /* also wait for the last barrier ack. */
  1176. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
  1177. /* and for any other previously queued work */
  1178. drbd_flush_workqueue(mdev);
  1179. rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
  1180. retcode = rv; /* FIXME: Type mismatch. */
  1181. drbd_resume_io(mdev);
  1182. if (rv < SS_SUCCESS)
  1183. goto fail;
  1184. if (!get_ldev_if_state(mdev, D_ATTACHING))
  1185. goto force_diskless;
  1186. drbd_md_set_sector_offsets(mdev, nbc);
  1187. if (!mdev->bitmap) {
  1188. if (drbd_bm_init(mdev)) {
  1189. retcode = ERR_NOMEM;
  1190. goto force_diskless_dec;
  1191. }
  1192. }
  1193. retcode = drbd_md_read(mdev, nbc);
  1194. if (retcode != NO_ERROR)
  1195. goto force_diskless_dec;
  1196. if (mdev->state.conn < C_CONNECTED &&
  1197. mdev->state.role == R_PRIMARY &&
  1198. (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
  1199. dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
  1200. (unsigned long long)mdev->ed_uuid);
  1201. retcode = ERR_DATA_NOT_CURRENT;
  1202. goto force_diskless_dec;
  1203. }
  1204. /* Since we are diskless, fix the activity log first... */
  1205. if (drbd_check_al_size(mdev, &nbc->dc)) {
  1206. retcode = ERR_NOMEM;
  1207. goto force_diskless_dec;
  1208. }
  1209. /* Prevent shrinking of consistent devices ! */
  1210. if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
  1211. drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
  1212. dev_warn(DEV, "refusing to truncate a consistent device\n");
  1213. retcode = ERR_DISK_TO_SMALL;
  1214. goto force_diskless_dec;
  1215. }
  1216. if (!drbd_al_read_log(mdev, nbc)) {
  1217. retcode = ERR_IO_MD_DISK;
  1218. goto force_diskless_dec;
  1219. }
  1220. /* Reset the "barriers don't work" bits here, then force meta data to
  1221. * be written, to ensure we determine if barriers are supported. */
  1222. if (nbc->dc.no_md_flush)
  1223. set_bit(MD_NO_FUA, &mdev->flags);
  1224. else
  1225. clear_bit(MD_NO_FUA, &mdev->flags);
  1226. /* Point of no return reached.
  1227. * Devices and memory are no longer released by error cleanup below.
  1228. * now mdev takes over responsibility, and the state engine should
  1229. * clean it up somewhere. */
  1230. D_ASSERT(mdev->ldev == NULL);
  1231. mdev->ldev = nbc;
  1232. mdev->resync = resync_lru;
  1233. nbc = NULL;
  1234. resync_lru = NULL;
  1235. mdev->write_ordering = WO_bdev_flush;
  1236. drbd_bump_write_ordering(mdev, WO_bdev_flush);
  1237. if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
  1238. set_bit(CRASHED_PRIMARY, &mdev->flags);
  1239. else
  1240. clear_bit(CRASHED_PRIMARY, &mdev->flags);
  1241. if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  1242. !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
  1243. set_bit(CRASHED_PRIMARY, &mdev->flags);
  1244. cp_discovered = 1;
  1245. }
  1246. mdev->send_cnt = 0;
  1247. mdev->recv_cnt = 0;
  1248. mdev->read_cnt = 0;
  1249. mdev->writ_cnt = 0;
  1250. drbd_reconsider_max_bio_size(mdev);
  1251. /* If I am currently not R_PRIMARY,
  1252. * but meta data primary indicator is set,
  1253. * I just now recover from a hard crash,
  1254. * and have been R_PRIMARY before that crash.
  1255. *
  1256. * Now, if I had no connection before that crash
  1257. * (have been degraded R_PRIMARY), chances are that
  1258. * I won't find my peer now either.
  1259. *
  1260. * In that case, and _only_ in that case,
  1261. * we use the degr-wfc-timeout instead of the default,
  1262. * so we can automatically recover from a crash of a
  1263. * degraded but active "cluster" after a certain timeout.
  1264. */
  1265. clear_bit(USE_DEGR_WFC_T, &mdev->flags);
  1266. if (mdev->state.role != R_PRIMARY &&
  1267. drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  1268. !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
  1269. set_bit(USE_DEGR_WFC_T, &mdev->flags);
  1270. dd = drbd_determine_dev_size(mdev, 0);
  1271. if (dd == dev_size_error) {
  1272. retcode = ERR_NOMEM_BITMAP;
  1273. goto force_diskless_dec;
  1274. } else if (dd == grew)
  1275. set_bit(RESYNC_AFTER_NEG, &mdev->flags);
  1276. if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
  1277. dev_info(DEV, "Assuming that all blocks are out of sync "
  1278. "(aka FullSync)\n");
  1279. if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
  1280. "set_n_write from attaching", BM_LOCKED_MASK)) {
  1281. retcode = ERR_IO_MD_DISK;
  1282. goto force_diskless_dec;
  1283. }
  1284. } else {
  1285. if (drbd_bitmap_io(mdev, &drbd_bm_read,
  1286. "read from attaching", BM_LOCKED_MASK)) {
  1287. retcode = ERR_IO_MD_DISK;
  1288. goto force_diskless_dec;
  1289. }
  1290. }
  1291. if (cp_discovered) {
  1292. drbd_al_apply_to_bm(mdev);
  1293. if (drbd_bitmap_io(mdev, &drbd_bm_write,
  1294. "crashed primary apply AL", BM_LOCKED_MASK)) {
  1295. retcode = ERR_IO_MD_DISK;
  1296. goto force_diskless_dec;
  1297. }
  1298. }
  1299. if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
  1300. drbd_suspend_al(mdev); /* IO is still suspended here... */
  1301. spin_lock_irq(&mdev->tconn->req_lock);
  1302. os = drbd_read_state(mdev);
  1303. ns = os;
  1304. /* If MDF_CONSISTENT is not set go into inconsistent state,
  1305. otherwise investigate MDF_WasUpToDate...
  1306. If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
  1307. otherwise into D_CONSISTENT state.
  1308. */
  1309. if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
  1310. if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
  1311. ns.disk = D_CONSISTENT;
  1312. else
  1313. ns.disk = D_OUTDATED;
  1314. } else {
  1315. ns.disk = D_INCONSISTENT;
  1316. }
  1317. if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
  1318. ns.pdsk = D_OUTDATED;
  1319. if ( ns.disk == D_CONSISTENT &&
  1320. (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
  1321. ns.disk = D_UP_TO_DATE;
  1322. /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
  1323. MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
  1324. this point, because drbd_request_state() modifies these
  1325. flags. */
  1326. /* In case we are C_CONNECTED postpone any decision on the new disk
  1327. state after the negotiation phase. */
  1328. if (mdev->state.conn == C_CONNECTED) {
  1329. mdev->new_state_tmp.i = ns.i;
  1330. ns.i = os.i;
  1331. ns.disk = D_NEGOTIATING;
  1332. /* We expect to receive up-to-date UUIDs soon.
  1333. To avoid a race in receive_state, free p_uuid while
  1334. holding req_lock. I.e. atomic with the state change */
  1335. kfree(mdev->p_uuid);
  1336. mdev->p_uuid = NULL;
  1337. }
  1338. rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
  1339. spin_unlock_irq(&mdev->tconn->req_lock);
  1340. if (rv < SS_SUCCESS)
  1341. goto force_diskless_dec;
  1342. if (mdev->state.role == R_PRIMARY)
  1343. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  1344. else
  1345. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  1346. drbd_md_mark_dirty(mdev);
  1347. drbd_md_sync(mdev);
  1348. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1349. put_ldev(mdev);
  1350. conn_reconfig_done(mdev->tconn);
  1351. drbd_adm_finish(info, retcode);
  1352. return 0;
  1353. force_diskless_dec:
  1354. put_ldev(mdev);
  1355. force_diskless:
  1356. drbd_force_state(mdev, NS(disk, D_FAILED));
  1357. drbd_md_sync(mdev);
  1358. fail:
  1359. conn_reconfig_done(mdev->tconn);
  1360. if (nbc) {
  1361. if (nbc->backing_bdev)
  1362. blkdev_put(nbc->backing_bdev,
  1363. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1364. if (nbc->md_bdev)
  1365. blkdev_put(nbc->md_bdev,
  1366. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1367. kfree(nbc);
  1368. }
  1369. lc_destroy(resync_lru);
  1370. finish:
  1371. drbd_adm_finish(info, retcode);
  1372. return 0;
  1373. }
  1374. static int adm_detach(struct drbd_conf *mdev)
  1375. {
  1376. enum drbd_state_rv retcode;
  1377. drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
  1378. retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
  1379. wait_event(mdev->misc_wait,
  1380. mdev->state.disk != D_DISKLESS ||
  1381. !atomic_read(&mdev->local_cnt));
  1382. drbd_resume_io(mdev);
  1383. return retcode;
  1384. }
  1385. /* Detaching the disk is a process in multiple stages. First we need to lock
  1386. * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
  1387. * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
  1388. * internal references as well.
  1389. * Only then we have finally detached. */
  1390. int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
  1391. {
  1392. enum drbd_ret_code retcode;
  1393. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1394. if (!adm_ctx.reply_skb)
  1395. return retcode;
  1396. if (retcode != NO_ERROR)
  1397. goto out;
  1398. retcode = adm_detach(adm_ctx.mdev);
  1399. out:
  1400. drbd_adm_finish(info, retcode);
  1401. return 0;
  1402. }
  1403. static bool conn_resync_running(struct drbd_tconn *tconn)
  1404. {
  1405. struct drbd_conf *mdev;
  1406. bool rv = false;
  1407. int vnr;
  1408. rcu_read_lock();
  1409. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  1410. if (mdev->state.conn == C_SYNC_SOURCE ||
  1411. mdev->state.conn == C_SYNC_TARGET ||
  1412. mdev->state.conn == C_PAUSED_SYNC_S ||
  1413. mdev->state.conn == C_PAUSED_SYNC_T) {
  1414. rv = true;
  1415. break;
  1416. }
  1417. }
  1418. rcu_read_unlock();
  1419. return rv;
  1420. }
  1421. static bool conn_ov_running(struct drbd_tconn *tconn)
  1422. {
  1423. struct drbd_conf *mdev;
  1424. bool rv = false;
  1425. int vnr;
  1426. rcu_read_lock();
  1427. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  1428. if (mdev->state.conn == C_VERIFY_S ||
  1429. mdev->state.conn == C_VERIFY_T) {
  1430. rv = true;
  1431. break;
  1432. }
  1433. }
  1434. rcu_read_unlock();
  1435. return rv;
  1436. }
  1437. static enum drbd_ret_code
  1438. _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
  1439. {
  1440. struct drbd_conf *mdev;
  1441. int i;
  1442. if (old_conf && tconn->agreed_pro_version < 100 &&
  1443. tconn->cstate == C_WF_REPORT_PARAMS &&
  1444. new_conf->wire_protocol != old_conf->wire_protocol)
  1445. return ERR_NEED_APV_100;
  1446. if (new_conf->two_primaries &&
  1447. (new_conf->wire_protocol != DRBD_PROT_C))
  1448. return ERR_NOT_PROTO_C;
  1449. idr_for_each_entry(&tconn->volumes, mdev, i) {
  1450. if (get_ldev(mdev)) {
  1451. enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
  1452. put_ldev(mdev);
  1453. if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
  1454. return ERR_STONITH_AND_PROT_A;
  1455. }
  1456. if (mdev->state.role == R_PRIMARY && new_conf->want_lose)
  1457. return ERR_DISCARD;
  1458. }
  1459. if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
  1460. return ERR_CONG_NOT_PROTO_A;
  1461. return NO_ERROR;
  1462. }
  1463. static enum drbd_ret_code
  1464. check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
  1465. {
  1466. static enum drbd_ret_code rv;
  1467. struct drbd_conf *mdev;
  1468. int i;
  1469. rcu_read_lock();
  1470. rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
  1471. rcu_read_unlock();
  1472. /* tconn->volumes protected by genl_lock() here */
  1473. idr_for_each_entry(&tconn->volumes, mdev, i) {
  1474. if (!mdev->bitmap) {
  1475. if(drbd_bm_init(mdev))
  1476. return ERR_NOMEM;
  1477. }
  1478. }
  1479. return rv;
  1480. }
  1481. int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
  1482. {
  1483. enum drbd_ret_code retcode;
  1484. struct drbd_tconn *tconn;
  1485. struct net_conf *old_conf, *new_conf = NULL;
  1486. int err;
  1487. int ovr; /* online verify running */
  1488. int rsr; /* re-sync running */
  1489. struct crypto_hash *verify_tfm = NULL;
  1490. struct crypto_hash *csums_tfm = NULL;
  1491. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  1492. if (!adm_ctx.reply_skb)
  1493. return retcode;
  1494. if (retcode != NO_ERROR)
  1495. goto out;
  1496. tconn = adm_ctx.tconn;
  1497. new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
  1498. if (!new_conf) {
  1499. retcode = ERR_NOMEM;
  1500. goto out;
  1501. }
  1502. conn_reconfig_start(tconn);
  1503. mutex_lock(&tconn->net_conf_update);
  1504. old_conf = tconn->net_conf;
  1505. if (!old_conf) {
  1506. drbd_msg_put_info("net conf missing, try connect");
  1507. retcode = ERR_INVALID_REQUEST;
  1508. goto fail;
  1509. }
  1510. *new_conf = *old_conf;
  1511. err = net_conf_from_attrs_for_change(new_conf, info);
  1512. if (err) {
  1513. retcode = ERR_MANDATORY_TAG;
  1514. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1515. goto fail;
  1516. }
  1517. retcode = check_net_options(tconn, new_conf);
  1518. if (retcode != NO_ERROR)
  1519. goto fail;
  1520. /* re-sync running */
  1521. rsr = conn_resync_running(tconn);
  1522. if (rsr && old_conf && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
  1523. retcode = ERR_CSUMS_RESYNC_RUNNING;
  1524. goto fail;
  1525. }
  1526. if (!rsr && new_conf->csums_alg[0]) {
  1527. csums_tfm = crypto_alloc_hash(new_conf->csums_alg, 0, CRYPTO_ALG_ASYNC);
  1528. if (IS_ERR(csums_tfm)) {
  1529. csums_tfm = NULL;
  1530. retcode = ERR_CSUMS_ALG;
  1531. goto fail;
  1532. }
  1533. if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
  1534. retcode = ERR_CSUMS_ALG_ND;
  1535. goto fail;
  1536. }
  1537. }
  1538. /* online verify running */
  1539. ovr = conn_ov_running(tconn);
  1540. if (ovr) {
  1541. if (strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
  1542. retcode = ERR_VERIFY_RUNNING;
  1543. goto fail;
  1544. }
  1545. }
  1546. if (!ovr && new_conf->verify_alg[0]) {
  1547. verify_tfm = crypto_alloc_hash(new_conf->verify_alg, 0, CRYPTO_ALG_ASYNC);
  1548. if (IS_ERR(verify_tfm)) {
  1549. verify_tfm = NULL;
  1550. retcode = ERR_VERIFY_ALG;
  1551. goto fail;
  1552. }
  1553. if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
  1554. retcode = ERR_VERIFY_ALG_ND;
  1555. goto fail;
  1556. }
  1557. }
  1558. rcu_assign_pointer(tconn->net_conf, new_conf);
  1559. if (!rsr) {
  1560. crypto_free_hash(tconn->csums_tfm);
  1561. tconn->csums_tfm = csums_tfm;
  1562. csums_tfm = NULL;
  1563. }
  1564. if (!ovr) {
  1565. crypto_free_hash(tconn->verify_tfm);
  1566. tconn->verify_tfm = verify_tfm;
  1567. verify_tfm = NULL;
  1568. }
  1569. mutex_unlock(&tconn->net_conf_update);
  1570. synchronize_rcu();
  1571. kfree(old_conf);
  1572. if (tconn->cstate >= C_WF_REPORT_PARAMS)
  1573. drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
  1574. goto done;
  1575. fail:
  1576. mutex_unlock(&tconn->net_conf_update);
  1577. crypto_free_hash(csums_tfm);
  1578. crypto_free_hash(verify_tfm);
  1579. kfree(new_conf);
  1580. done:
  1581. conn_reconfig_done(tconn);
  1582. out:
  1583. drbd_adm_finish(info, retcode);
  1584. return 0;
  1585. }
  1586. int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
  1587. {
  1588. char hmac_name[CRYPTO_MAX_ALG_NAME];
  1589. struct drbd_conf *mdev;
  1590. struct net_conf *old_conf, *new_conf = NULL;
  1591. struct crypto_hash *tfm = NULL;
  1592. struct crypto_hash *integrity_w_tfm = NULL;
  1593. struct crypto_hash *integrity_r_tfm = NULL;
  1594. void *int_dig_in = NULL;
  1595. void *int_dig_vv = NULL;
  1596. struct drbd_tconn *oconn;
  1597. struct drbd_tconn *tconn;
  1598. struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
  1599. enum drbd_ret_code retcode;
  1600. int i;
  1601. int err;
  1602. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  1603. if (!adm_ctx.reply_skb)
  1604. return retcode;
  1605. if (retcode != NO_ERROR)
  1606. goto out;
  1607. tconn = adm_ctx.tconn;
  1608. conn_reconfig_start(tconn);
  1609. if (tconn->cstate > C_STANDALONE) {
  1610. retcode = ERR_NET_CONFIGURED;
  1611. goto fail;
  1612. }
  1613. /* allocation not in the IO path, cqueue thread context */
  1614. new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
  1615. if (!new_conf) {
  1616. retcode = ERR_NOMEM;
  1617. goto fail;
  1618. }
  1619. *new_conf = (struct net_conf) {
  1620. {}, 0, /* my_addr */
  1621. {}, 0, /* peer_addr */
  1622. {}, 0, /* shared_secret */
  1623. {}, 0, /* cram_hmac_alg */
  1624. {}, 0, /* integrity_alg */
  1625. {}, 0, /* verify_alg */
  1626. {}, 0, /* csums_alg */
  1627. DRBD_PROTOCOL_DEF, /* wire_protocol */
  1628. DRBD_CONNECT_INT_DEF, /* try_connect_int */
  1629. DRBD_TIMEOUT_DEF, /* timeout */
  1630. DRBD_PING_INT_DEF, /* ping_int */
  1631. DRBD_PING_TIMEO_DEF, /* ping_timeo */
  1632. DRBD_SNDBUF_SIZE_DEF, /* sndbuf_size */
  1633. DRBD_RCVBUF_SIZE_DEF, /* rcvbuf_size */
  1634. DRBD_KO_COUNT_DEF, /* ko_count */
  1635. DRBD_MAX_BUFFERS_DEF, /* max_buffers */
  1636. DRBD_MAX_EPOCH_SIZE_DEF, /* max_epoch_size */
  1637. DRBD_UNPLUG_WATERMARK_DEF, /* unplug_watermark */
  1638. DRBD_AFTER_SB_0P_DEF, /* after_sb_0p */
  1639. DRBD_AFTER_SB_1P_DEF, /* after_sb_1p */
  1640. DRBD_AFTER_SB_2P_DEF, /* after_sb_2p */
  1641. DRBD_RR_CONFLICT_DEF, /* rr_conflict */
  1642. DRBD_ON_CONGESTION_DEF, /* on_congestion */
  1643. DRBD_CONG_FILL_DEF, /* cong_fill */
  1644. DRBD_CONG_EXTENTS_DEF, /* cong_extents */
  1645. 0, /* two_primaries */
  1646. 0, /* want_lose */
  1647. 0, /* no_cork */
  1648. 0, /* always_asbp */
  1649. 0, /* dry_run */
  1650. 0, /* use_rle */
  1651. };
  1652. err = net_conf_from_attrs(new_conf, info);
  1653. if (err) {
  1654. retcode = ERR_MANDATORY_TAG;
  1655. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1656. goto fail;
  1657. }
  1658. retcode = check_net_options(tconn, new_conf);
  1659. if (retcode != NO_ERROR)
  1660. goto fail;
  1661. retcode = NO_ERROR;
  1662. new_my_addr = (struct sockaddr *)&new_conf->my_addr;
  1663. new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
  1664. /* No need to take drbd_cfg_rwsem here. All reconfiguration is
  1665. * strictly serialized on genl_lock(). We are protected against
  1666. * concurrent reconfiguration/addition/deletion */
  1667. list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
  1668. struct net_conf *nc;
  1669. if (oconn == tconn)
  1670. continue;
  1671. rcu_read_lock();
  1672. nc = rcu_dereference(oconn->net_conf);
  1673. if (nc) {
  1674. taken_addr = (struct sockaddr *)&nc->my_addr;
  1675. if (new_conf->my_addr_len == nc->my_addr_len &&
  1676. !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
  1677. retcode = ERR_LOCAL_ADDR;
  1678. taken_addr = (struct sockaddr *)&nc->peer_addr;
  1679. if (new_conf->peer_addr_len == nc->peer_addr_len &&
  1680. !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
  1681. retcode = ERR_PEER_ADDR;
  1682. }
  1683. rcu_read_unlock();
  1684. if (retcode != NO_ERROR)
  1685. goto fail;
  1686. }
  1687. if (new_conf->cram_hmac_alg[0] != 0) {
  1688. snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
  1689. new_conf->cram_hmac_alg);
  1690. tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
  1691. if (IS_ERR(tfm)) {
  1692. tfm = NULL;
  1693. retcode = ERR_AUTH_ALG;
  1694. goto fail;
  1695. }
  1696. if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
  1697. retcode = ERR_AUTH_ALG_ND;
  1698. goto fail;
  1699. }
  1700. }
  1701. if (new_conf->integrity_alg[0]) {
  1702. integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1703. if (IS_ERR(integrity_w_tfm)) {
  1704. integrity_w_tfm = NULL;
  1705. retcode=ERR_INTEGRITY_ALG;
  1706. goto fail;
  1707. }
  1708. if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
  1709. retcode=ERR_INTEGRITY_ALG_ND;
  1710. goto fail;
  1711. }
  1712. integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1713. if (IS_ERR(integrity_r_tfm)) {
  1714. integrity_r_tfm = NULL;
  1715. retcode=ERR_INTEGRITY_ALG;
  1716. goto fail;
  1717. }
  1718. }
  1719. ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
  1720. /* allocation not in the IO path, cqueue thread context */
  1721. if (integrity_w_tfm) {
  1722. i = crypto_hash_digestsize(integrity_w_tfm);
  1723. int_dig_in = kmalloc(i, GFP_KERNEL);
  1724. if (!int_dig_in) {
  1725. retcode = ERR_NOMEM;
  1726. goto fail;
  1727. }
  1728. int_dig_vv = kmalloc(i, GFP_KERNEL);
  1729. if (!int_dig_vv) {
  1730. retcode = ERR_NOMEM;
  1731. goto fail;
  1732. }
  1733. }
  1734. conn_flush_workqueue(tconn);
  1735. mutex_lock(&tconn->net_conf_update);
  1736. old_conf = tconn->net_conf;
  1737. if (old_conf) {
  1738. retcode = ERR_NET_CONFIGURED;
  1739. mutex_unlock(&tconn->net_conf_update);
  1740. goto fail;
  1741. }
  1742. rcu_assign_pointer(tconn->net_conf, new_conf);
  1743. conn_free_crypto(tconn);
  1744. tconn->cram_hmac_tfm = tfm;
  1745. tconn->integrity_w_tfm = integrity_w_tfm;
  1746. tconn->integrity_r_tfm = integrity_r_tfm;
  1747. tconn->int_dig_in = int_dig_in;
  1748. tconn->int_dig_vv = int_dig_vv;
  1749. mutex_unlock(&tconn->net_conf_update);
  1750. rcu_read_lock();
  1751. idr_for_each_entry(&tconn->volumes, mdev, i) {
  1752. mdev->send_cnt = 0;
  1753. mdev->recv_cnt = 0;
  1754. }
  1755. rcu_read_unlock();
  1756. retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
  1757. conn_reconfig_done(tconn);
  1758. drbd_adm_finish(info, retcode);
  1759. return 0;
  1760. fail:
  1761. kfree(int_dig_in);
  1762. kfree(int_dig_vv);
  1763. crypto_free_hash(tfm);
  1764. crypto_free_hash(integrity_w_tfm);
  1765. crypto_free_hash(integrity_r_tfm);
  1766. kfree(new_conf);
  1767. conn_reconfig_done(tconn);
  1768. out:
  1769. drbd_adm_finish(info, retcode);
  1770. return 0;
  1771. }
  1772. static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
  1773. {
  1774. enum drbd_state_rv rv;
  1775. if (force) {
  1776. spin_lock_irq(&tconn->req_lock);
  1777. rv = _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
  1778. spin_unlock_irq(&tconn->req_lock);
  1779. return rv;
  1780. }
  1781. rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
  1782. switch (rv) {
  1783. case SS_NOTHING_TO_DO:
  1784. case SS_ALREADY_STANDALONE:
  1785. return SS_SUCCESS;
  1786. case SS_PRIMARY_NOP:
  1787. /* Our state checking code wants to see the peer outdated. */
  1788. rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
  1789. pdsk, D_OUTDATED), CS_VERBOSE);
  1790. break;
  1791. case SS_CW_FAILED_BY_PEER:
  1792. /* The peer probably wants to see us outdated. */
  1793. rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
  1794. disk, D_OUTDATED), 0);
  1795. if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
  1796. conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
  1797. rv = SS_SUCCESS;
  1798. }
  1799. break;
  1800. default:;
  1801. /* no special handling necessary */
  1802. }
  1803. return rv;
  1804. }
  1805. int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
  1806. {
  1807. struct disconnect_parms parms;
  1808. struct drbd_tconn *tconn;
  1809. enum drbd_state_rv rv;
  1810. enum drbd_ret_code retcode;
  1811. int err;
  1812. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  1813. if (!adm_ctx.reply_skb)
  1814. return retcode;
  1815. if (retcode != NO_ERROR)
  1816. goto fail;
  1817. tconn = adm_ctx.tconn;
  1818. memset(&parms, 0, sizeof(parms));
  1819. if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
  1820. err = disconnect_parms_from_attrs(&parms, info);
  1821. if (err) {
  1822. retcode = ERR_MANDATORY_TAG;
  1823. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1824. goto fail;
  1825. }
  1826. }
  1827. rv = conn_try_disconnect(tconn, parms.force_disconnect);
  1828. if (rv < SS_SUCCESS)
  1829. goto fail;
  1830. /* No one else can reconfigure the network while I am here.
  1831. * The state handling only uses drbd_thread_stop_nowait(),
  1832. * we want to really wait here until the receiver is no more. */
  1833. drbd_thread_stop(&tconn->receiver);
  1834. if (wait_event_interruptible(tconn->ping_wait,
  1835. tconn->cstate == C_STANDALONE)) {
  1836. retcode = ERR_INTR;
  1837. goto fail;
  1838. }
  1839. retcode = NO_ERROR;
  1840. fail:
  1841. drbd_adm_finish(info, retcode);
  1842. return 0;
  1843. }
  1844. void resync_after_online_grow(struct drbd_conf *mdev)
  1845. {
  1846. int iass; /* I am sync source */
  1847. dev_info(DEV, "Resync of new storage after online grow\n");
  1848. if (mdev->state.role != mdev->state.peer)
  1849. iass = (mdev->state.role == R_PRIMARY);
  1850. else
  1851. iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
  1852. if (iass)
  1853. drbd_start_resync(mdev, C_SYNC_SOURCE);
  1854. else
  1855. _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
  1856. }
  1857. int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
  1858. {
  1859. struct resize_parms rs;
  1860. struct drbd_conf *mdev;
  1861. enum drbd_ret_code retcode;
  1862. enum determine_dev_size dd;
  1863. enum dds_flags ddsf;
  1864. int err;
  1865. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1866. if (!adm_ctx.reply_skb)
  1867. return retcode;
  1868. if (retcode != NO_ERROR)
  1869. goto fail;
  1870. memset(&rs, 0, sizeof(struct resize_parms));
  1871. if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
  1872. err = resize_parms_from_attrs(&rs, info);
  1873. if (err) {
  1874. retcode = ERR_MANDATORY_TAG;
  1875. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1876. goto fail;
  1877. }
  1878. }
  1879. mdev = adm_ctx.mdev;
  1880. if (mdev->state.conn > C_CONNECTED) {
  1881. retcode = ERR_RESIZE_RESYNC;
  1882. goto fail;
  1883. }
  1884. if (mdev->state.role == R_SECONDARY &&
  1885. mdev->state.peer == R_SECONDARY) {
  1886. retcode = ERR_NO_PRIMARY;
  1887. goto fail;
  1888. }
  1889. if (!get_ldev(mdev)) {
  1890. retcode = ERR_NO_DISK;
  1891. goto fail;
  1892. }
  1893. if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
  1894. retcode = ERR_NEED_APV_93;
  1895. goto fail;
  1896. }
  1897. if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
  1898. mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
  1899. mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
  1900. ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
  1901. dd = drbd_determine_dev_size(mdev, ddsf);
  1902. drbd_md_sync(mdev);
  1903. put_ldev(mdev);
  1904. if (dd == dev_size_error) {
  1905. retcode = ERR_NOMEM_BITMAP;
  1906. goto fail;
  1907. }
  1908. if (mdev->state.conn == C_CONNECTED) {
  1909. if (dd == grew)
  1910. set_bit(RESIZE_PENDING, &mdev->flags);
  1911. drbd_send_uuids(mdev);
  1912. drbd_send_sizes(mdev, 1, ddsf);
  1913. }
  1914. fail:
  1915. drbd_adm_finish(info, retcode);
  1916. return 0;
  1917. }
  1918. int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
  1919. {
  1920. enum drbd_ret_code retcode;
  1921. cpumask_var_t new_cpu_mask;
  1922. struct drbd_tconn *tconn;
  1923. int *rs_plan_s = NULL;
  1924. struct res_opts sc;
  1925. int err;
  1926. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  1927. if (!adm_ctx.reply_skb)
  1928. return retcode;
  1929. if (retcode != NO_ERROR)
  1930. goto fail;
  1931. tconn = adm_ctx.tconn;
  1932. if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
  1933. retcode = ERR_NOMEM;
  1934. drbd_msg_put_info("unable to allocate cpumask");
  1935. goto fail;
  1936. }
  1937. if (((struct drbd_genlmsghdr*)info->userhdr)->flags
  1938. & DRBD_GENL_F_SET_DEFAULTS) {
  1939. memset(&sc, 0, sizeof(struct res_opts));
  1940. sc.on_no_data = DRBD_ON_NO_DATA_DEF;
  1941. } else
  1942. sc = tconn->res_opts;
  1943. err = res_opts_from_attrs(&sc, info);
  1944. if (err) {
  1945. retcode = ERR_MANDATORY_TAG;
  1946. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1947. goto fail;
  1948. }
  1949. /* silently ignore cpu mask on UP kernel */
  1950. if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
  1951. err = __bitmap_parse(sc.cpu_mask, 32, 0,
  1952. cpumask_bits(new_cpu_mask), nr_cpu_ids);
  1953. if (err) {
  1954. conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
  1955. retcode = ERR_CPU_MASK_PARSE;
  1956. goto fail;
  1957. }
  1958. }
  1959. tconn->res_opts = sc;
  1960. if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
  1961. cpumask_copy(tconn->cpu_mask, new_cpu_mask);
  1962. drbd_calc_cpu_mask(tconn);
  1963. tconn->receiver.reset_cpu_mask = 1;
  1964. tconn->asender.reset_cpu_mask = 1;
  1965. tconn->worker.reset_cpu_mask = 1;
  1966. }
  1967. fail:
  1968. kfree(rs_plan_s);
  1969. free_cpumask_var(new_cpu_mask);
  1970. drbd_adm_finish(info, retcode);
  1971. return 0;
  1972. }
  1973. int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
  1974. {
  1975. struct drbd_conf *mdev;
  1976. int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
  1977. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1978. if (!adm_ctx.reply_skb)
  1979. return retcode;
  1980. if (retcode != NO_ERROR)
  1981. goto out;
  1982. mdev = adm_ctx.mdev;
  1983. /* If there is still bitmap IO pending, probably because of a previous
  1984. * resync just being finished, wait for it before requesting a new resync. */
  1985. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  1986. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
  1987. if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
  1988. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1989. while (retcode == SS_NEED_CONNECTION) {
  1990. spin_lock_irq(&mdev->tconn->req_lock);
  1991. if (mdev->state.conn < C_CONNECTED)
  1992. retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
  1993. spin_unlock_irq(&mdev->tconn->req_lock);
  1994. if (retcode != SS_NEED_CONNECTION)
  1995. break;
  1996. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1997. }
  1998. out:
  1999. drbd_adm_finish(info, retcode);
  2000. return 0;
  2001. }
  2002. static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
  2003. {
  2004. int rv;
  2005. rv = drbd_bmio_set_n_write(mdev);
  2006. drbd_suspend_al(mdev);
  2007. return rv;
  2008. }
  2009. static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
  2010. union drbd_state mask, union drbd_state val)
  2011. {
  2012. enum drbd_ret_code retcode;
  2013. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2014. if (!adm_ctx.reply_skb)
  2015. return retcode;
  2016. if (retcode != NO_ERROR)
  2017. goto out;
  2018. retcode = drbd_request_state(adm_ctx.mdev, mask, val);
  2019. out:
  2020. drbd_adm_finish(info, retcode);
  2021. return 0;
  2022. }
  2023. int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
  2024. {
  2025. return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
  2026. }
  2027. int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
  2028. {
  2029. enum drbd_ret_code retcode;
  2030. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2031. if (!adm_ctx.reply_skb)
  2032. return retcode;
  2033. if (retcode != NO_ERROR)
  2034. goto out;
  2035. if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
  2036. retcode = ERR_PAUSE_IS_SET;
  2037. out:
  2038. drbd_adm_finish(info, retcode);
  2039. return 0;
  2040. }
  2041. int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
  2042. {
  2043. union drbd_dev_state s;
  2044. enum drbd_ret_code retcode;
  2045. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2046. if (!adm_ctx.reply_skb)
  2047. return retcode;
  2048. if (retcode != NO_ERROR)
  2049. goto out;
  2050. if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
  2051. s = adm_ctx.mdev->state;
  2052. if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
  2053. retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
  2054. s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
  2055. } else {
  2056. retcode = ERR_PAUSE_IS_CLEAR;
  2057. }
  2058. }
  2059. out:
  2060. drbd_adm_finish(info, retcode);
  2061. return 0;
  2062. }
  2063. int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
  2064. {
  2065. return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
  2066. }
  2067. int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
  2068. {
  2069. struct drbd_conf *mdev;
  2070. int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
  2071. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2072. if (!adm_ctx.reply_skb)
  2073. return retcode;
  2074. if (retcode != NO_ERROR)
  2075. goto out;
  2076. mdev = adm_ctx.mdev;
  2077. if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
  2078. drbd_uuid_new_current(mdev);
  2079. clear_bit(NEW_CUR_UUID, &mdev->flags);
  2080. }
  2081. drbd_suspend_io(mdev);
  2082. retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
  2083. if (retcode == SS_SUCCESS) {
  2084. if (mdev->state.conn < C_CONNECTED)
  2085. tl_clear(mdev->tconn);
  2086. if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
  2087. tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
  2088. }
  2089. drbd_resume_io(mdev);
  2090. out:
  2091. drbd_adm_finish(info, retcode);
  2092. return 0;
  2093. }
  2094. int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
  2095. {
  2096. return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
  2097. }
  2098. int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
  2099. {
  2100. struct nlattr *nla;
  2101. nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
  2102. if (!nla)
  2103. goto nla_put_failure;
  2104. if (vnr != VOLUME_UNSPECIFIED)
  2105. NLA_PUT_U32(skb, T_ctx_volume, vnr);
  2106. NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
  2107. nla_nest_end(skb, nla);
  2108. return 0;
  2109. nla_put_failure:
  2110. if (nla)
  2111. nla_nest_cancel(skb, nla);
  2112. return -EMSGSIZE;
  2113. }
  2114. int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
  2115. const struct sib_info *sib)
  2116. {
  2117. struct state_info *si = NULL; /* for sizeof(si->member); */
  2118. struct net_conf *nc;
  2119. struct nlattr *nla;
  2120. int got_ldev;
  2121. int err = 0;
  2122. int exclude_sensitive;
  2123. /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
  2124. * to. So we better exclude_sensitive information.
  2125. *
  2126. * If sib == NULL, this is drbd_adm_get_status, executed synchronously
  2127. * in the context of the requesting user process. Exclude sensitive
  2128. * information, unless current has superuser.
  2129. *
  2130. * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
  2131. * relies on the current implementation of netlink_dump(), which
  2132. * executes the dump callback successively from netlink_recvmsg(),
  2133. * always in the context of the receiving process */
  2134. exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
  2135. got_ldev = get_ldev(mdev);
  2136. /* We need to add connection name and volume number information still.
  2137. * Minor number is in drbd_genlmsghdr. */
  2138. if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
  2139. goto nla_put_failure;
  2140. if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
  2141. goto nla_put_failure;
  2142. if (got_ldev)
  2143. if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
  2144. goto nla_put_failure;
  2145. rcu_read_lock();
  2146. nc = rcu_dereference(mdev->tconn->net_conf);
  2147. if (nc)
  2148. err = net_conf_to_skb(skb, nc, exclude_sensitive);
  2149. rcu_read_unlock();
  2150. if (err)
  2151. goto nla_put_failure;
  2152. nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
  2153. if (!nla)
  2154. goto nla_put_failure;
  2155. NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
  2156. NLA_PUT_U32(skb, T_current_state, mdev->state.i);
  2157. NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
  2158. NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
  2159. if (got_ldev) {
  2160. NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
  2161. NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
  2162. NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
  2163. NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
  2164. if (C_SYNC_SOURCE <= mdev->state.conn &&
  2165. C_PAUSED_SYNC_T >= mdev->state.conn) {
  2166. NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
  2167. NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
  2168. }
  2169. }
  2170. if (sib) {
  2171. switch(sib->sib_reason) {
  2172. case SIB_SYNC_PROGRESS:
  2173. case SIB_GET_STATUS_REPLY:
  2174. break;
  2175. case SIB_STATE_CHANGE:
  2176. NLA_PUT_U32(skb, T_prev_state, sib->os.i);
  2177. NLA_PUT_U32(skb, T_new_state, sib->ns.i);
  2178. break;
  2179. case SIB_HELPER_POST:
  2180. NLA_PUT_U32(skb,
  2181. T_helper_exit_code, sib->helper_exit_code);
  2182. /* fall through */
  2183. case SIB_HELPER_PRE:
  2184. NLA_PUT_STRING(skb, T_helper, sib->helper_name);
  2185. break;
  2186. }
  2187. }
  2188. nla_nest_end(skb, nla);
  2189. if (0)
  2190. nla_put_failure:
  2191. err = -EMSGSIZE;
  2192. if (got_ldev)
  2193. put_ldev(mdev);
  2194. return err;
  2195. }
  2196. int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
  2197. {
  2198. enum drbd_ret_code retcode;
  2199. int err;
  2200. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2201. if (!adm_ctx.reply_skb)
  2202. return retcode;
  2203. if (retcode != NO_ERROR)
  2204. goto out;
  2205. err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
  2206. if (err) {
  2207. nlmsg_free(adm_ctx.reply_skb);
  2208. return err;
  2209. }
  2210. out:
  2211. drbd_adm_finish(info, retcode);
  2212. return 0;
  2213. }
  2214. int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
  2215. {
  2216. struct drbd_conf *mdev;
  2217. struct drbd_genlmsghdr *dh;
  2218. struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
  2219. struct drbd_tconn *tconn = NULL;
  2220. struct drbd_tconn *tmp;
  2221. unsigned volume = cb->args[1];
  2222. /* Open coded, deferred, iteration:
  2223. * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
  2224. * idr_for_each_entry(&tconn->volumes, mdev, i) {
  2225. * ...
  2226. * }
  2227. * }
  2228. * where tconn is cb->args[0];
  2229. * and i is cb->args[1];
  2230. *
  2231. * cb->args[2] indicates if we shall loop over all resources,
  2232. * or just dump all volumes of a single resource.
  2233. *
  2234. * This may miss entries inserted after this dump started,
  2235. * or entries deleted before they are reached.
  2236. *
  2237. * We need to make sure the mdev won't disappear while
  2238. * we are looking at it, and revalidate our iterators
  2239. * on each iteration.
  2240. */
  2241. /* synchronize with conn_create()/conn_destroy() */
  2242. down_read(&drbd_cfg_rwsem);
  2243. /* revalidate iterator position */
  2244. list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
  2245. if (pos == NULL) {
  2246. /* first iteration */
  2247. pos = tmp;
  2248. tconn = pos;
  2249. break;
  2250. }
  2251. if (tmp == pos) {
  2252. tconn = pos;
  2253. break;
  2254. }
  2255. }
  2256. if (tconn) {
  2257. next_tconn:
  2258. mdev = idr_get_next(&tconn->volumes, &volume);
  2259. if (!mdev) {
  2260. /* No more volumes to dump on this tconn.
  2261. * Advance tconn iterator. */
  2262. pos = list_entry(tconn->all_tconn.next,
  2263. struct drbd_tconn, all_tconn);
  2264. /* Did we dump any volume on this tconn yet? */
  2265. if (volume != 0) {
  2266. /* If we reached the end of the list,
  2267. * or only a single resource dump was requested,
  2268. * we are done. */
  2269. if (&pos->all_tconn == &drbd_tconns || cb->args[2])
  2270. goto out;
  2271. volume = 0;
  2272. tconn = pos;
  2273. goto next_tconn;
  2274. }
  2275. }
  2276. dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
  2277. cb->nlh->nlmsg_seq, &drbd_genl_family,
  2278. NLM_F_MULTI, DRBD_ADM_GET_STATUS);
  2279. if (!dh)
  2280. goto out;
  2281. if (!mdev) {
  2282. /* this is a tconn without a single volume */
  2283. dh->minor = -1U;
  2284. dh->ret_code = NO_ERROR;
  2285. if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
  2286. genlmsg_cancel(skb, dh);
  2287. else
  2288. genlmsg_end(skb, dh);
  2289. goto out;
  2290. }
  2291. D_ASSERT(mdev->vnr == volume);
  2292. D_ASSERT(mdev->tconn == tconn);
  2293. dh->minor = mdev_to_minor(mdev);
  2294. dh->ret_code = NO_ERROR;
  2295. if (nla_put_status_info(skb, mdev, NULL)) {
  2296. genlmsg_cancel(skb, dh);
  2297. goto out;
  2298. }
  2299. genlmsg_end(skb, dh);
  2300. }
  2301. out:
  2302. up_read(&drbd_cfg_rwsem);
  2303. /* where to start the next iteration */
  2304. cb->args[0] = (long)pos;
  2305. cb->args[1] = (pos == tconn) ? volume + 1 : 0;
  2306. /* No more tconns/volumes/minors found results in an empty skb.
  2307. * Which will terminate the dump. */
  2308. return skb->len;
  2309. }
  2310. /*
  2311. * Request status of all resources, or of all volumes within a single resource.
  2312. *
  2313. * This is a dump, as the answer may not fit in a single reply skb otherwise.
  2314. * Which means we cannot use the family->attrbuf or other such members, because
  2315. * dump is NOT protected by the genl_lock(). During dump, we only have access
  2316. * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
  2317. *
  2318. * Once things are setup properly, we call into get_one_status().
  2319. */
  2320. int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
  2321. {
  2322. const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
  2323. struct nlattr *nla;
  2324. const char *conn_name;
  2325. struct drbd_tconn *tconn;
  2326. /* Is this a followup call? */
  2327. if (cb->args[0]) {
  2328. /* ... of a single resource dump,
  2329. * and the resource iterator has been advanced already? */
  2330. if (cb->args[2] && cb->args[2] != cb->args[0])
  2331. return 0; /* DONE. */
  2332. goto dump;
  2333. }
  2334. /* First call (from netlink_dump_start). We need to figure out
  2335. * which resource(s) the user wants us to dump. */
  2336. nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
  2337. nlmsg_attrlen(cb->nlh, hdrlen),
  2338. DRBD_NLA_CFG_CONTEXT);
  2339. /* No explicit context given. Dump all. */
  2340. if (!nla)
  2341. goto dump;
  2342. nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
  2343. /* context given, but no name present? */
  2344. if (!nla)
  2345. return -EINVAL;
  2346. conn_name = nla_data(nla);
  2347. tconn = conn_get_by_name(conn_name);
  2348. if (!tconn)
  2349. return -ENODEV;
  2350. kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
  2351. /* prime iterators, and set "filter" mode mark:
  2352. * only dump this tconn. */
  2353. cb->args[0] = (long)tconn;
  2354. /* cb->args[1] = 0; passed in this way. */
  2355. cb->args[2] = (long)tconn;
  2356. dump:
  2357. return get_one_status(skb, cb);
  2358. }
  2359. int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
  2360. {
  2361. enum drbd_ret_code retcode;
  2362. struct timeout_parms tp;
  2363. int err;
  2364. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2365. if (!adm_ctx.reply_skb)
  2366. return retcode;
  2367. if (retcode != NO_ERROR)
  2368. goto out;
  2369. tp.timeout_type =
  2370. adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
  2371. test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
  2372. UT_DEFAULT;
  2373. err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
  2374. if (err) {
  2375. nlmsg_free(adm_ctx.reply_skb);
  2376. return err;
  2377. }
  2378. out:
  2379. drbd_adm_finish(info, retcode);
  2380. return 0;
  2381. }
  2382. int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
  2383. {
  2384. struct drbd_conf *mdev;
  2385. enum drbd_ret_code retcode;
  2386. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2387. if (!adm_ctx.reply_skb)
  2388. return retcode;
  2389. if (retcode != NO_ERROR)
  2390. goto out;
  2391. mdev = adm_ctx.mdev;
  2392. if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
  2393. /* resume from last known position, if possible */
  2394. struct start_ov_parms parms =
  2395. { .ov_start_sector = mdev->ov_start_sector };
  2396. int err = start_ov_parms_from_attrs(&parms, info);
  2397. if (err) {
  2398. retcode = ERR_MANDATORY_TAG;
  2399. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2400. goto out;
  2401. }
  2402. /* w_make_ov_request expects position to be aligned */
  2403. mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
  2404. }
  2405. /* If there is still bitmap IO pending, e.g. previous resync or verify
  2406. * just being finished, wait for it before requesting a new resync. */
  2407. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  2408. retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
  2409. out:
  2410. drbd_adm_finish(info, retcode);
  2411. return 0;
  2412. }
  2413. int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
  2414. {
  2415. struct drbd_conf *mdev;
  2416. enum drbd_ret_code retcode;
  2417. int skip_initial_sync = 0;
  2418. int err;
  2419. struct new_c_uuid_parms args;
  2420. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2421. if (!adm_ctx.reply_skb)
  2422. return retcode;
  2423. if (retcode != NO_ERROR)
  2424. goto out_nolock;
  2425. mdev = adm_ctx.mdev;
  2426. memset(&args, 0, sizeof(args));
  2427. if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
  2428. err = new_c_uuid_parms_from_attrs(&args, info);
  2429. if (err) {
  2430. retcode = ERR_MANDATORY_TAG;
  2431. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2432. goto out_nolock;
  2433. }
  2434. }
  2435. mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
  2436. if (!get_ldev(mdev)) {
  2437. retcode = ERR_NO_DISK;
  2438. goto out;
  2439. }
  2440. /* this is "skip initial sync", assume to be clean */
  2441. if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
  2442. mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
  2443. dev_info(DEV, "Preparing to skip initial sync\n");
  2444. skip_initial_sync = 1;
  2445. } else if (mdev->state.conn != C_STANDALONE) {
  2446. retcode = ERR_CONNECTED;
  2447. goto out_dec;
  2448. }
  2449. drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
  2450. drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
  2451. if (args.clear_bm) {
  2452. err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
  2453. "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
  2454. if (err) {
  2455. dev_err(DEV, "Writing bitmap failed with %d\n",err);
  2456. retcode = ERR_IO_MD_DISK;
  2457. }
  2458. if (skip_initial_sync) {
  2459. drbd_send_uuids_skip_initial_sync(mdev);
  2460. _drbd_uuid_set(mdev, UI_BITMAP, 0);
  2461. drbd_print_uuids(mdev, "cleared bitmap UUID");
  2462. spin_lock_irq(&mdev->tconn->req_lock);
  2463. _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
  2464. CS_VERBOSE, NULL);
  2465. spin_unlock_irq(&mdev->tconn->req_lock);
  2466. }
  2467. }
  2468. drbd_md_sync(mdev);
  2469. out_dec:
  2470. put_ldev(mdev);
  2471. out:
  2472. mutex_unlock(mdev->state_mutex);
  2473. out_nolock:
  2474. drbd_adm_finish(info, retcode);
  2475. return 0;
  2476. }
  2477. static enum drbd_ret_code
  2478. drbd_check_conn_name(const char *name)
  2479. {
  2480. if (!name || !name[0]) {
  2481. drbd_msg_put_info("connection name missing");
  2482. return ERR_MANDATORY_TAG;
  2483. }
  2484. /* if we want to use these in sysfs/configfs/debugfs some day,
  2485. * we must not allow slashes */
  2486. if (strchr(name, '/')) {
  2487. drbd_msg_put_info("invalid connection name");
  2488. return ERR_INVALID_REQUEST;
  2489. }
  2490. return NO_ERROR;
  2491. }
  2492. int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
  2493. {
  2494. enum drbd_ret_code retcode;
  2495. retcode = drbd_adm_prepare(skb, info, 0);
  2496. if (!adm_ctx.reply_skb)
  2497. return retcode;
  2498. if (retcode != NO_ERROR)
  2499. goto out;
  2500. retcode = drbd_check_conn_name(adm_ctx.conn_name);
  2501. if (retcode != NO_ERROR)
  2502. goto out;
  2503. if (adm_ctx.tconn) {
  2504. if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
  2505. retcode = ERR_INVALID_REQUEST;
  2506. drbd_msg_put_info("connection exists");
  2507. }
  2508. /* else: still NO_ERROR */
  2509. goto out;
  2510. }
  2511. if (!conn_create(adm_ctx.conn_name))
  2512. retcode = ERR_NOMEM;
  2513. out:
  2514. drbd_adm_finish(info, retcode);
  2515. return 0;
  2516. }
  2517. int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
  2518. {
  2519. struct drbd_genlmsghdr *dh = info->userhdr;
  2520. enum drbd_ret_code retcode;
  2521. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  2522. if (!adm_ctx.reply_skb)
  2523. return retcode;
  2524. if (retcode != NO_ERROR)
  2525. goto out;
  2526. /* FIXME drop minor_count parameter, limit to MINORMASK */
  2527. if (dh->minor >= minor_count) {
  2528. drbd_msg_put_info("requested minor out of range");
  2529. retcode = ERR_INVALID_REQUEST;
  2530. goto out;
  2531. }
  2532. if (adm_ctx.volume > DRBD_VOLUME_MAX) {
  2533. drbd_msg_put_info("requested volume id out of range");
  2534. retcode = ERR_INVALID_REQUEST;
  2535. goto out;
  2536. }
  2537. /* drbd_adm_prepare made sure already
  2538. * that mdev->tconn and mdev->vnr match the request. */
  2539. if (adm_ctx.mdev) {
  2540. if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
  2541. retcode = ERR_MINOR_EXISTS;
  2542. /* else: still NO_ERROR */
  2543. goto out;
  2544. }
  2545. down_write(&drbd_cfg_rwsem);
  2546. retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
  2547. up_write(&drbd_cfg_rwsem);
  2548. out:
  2549. drbd_adm_finish(info, retcode);
  2550. return 0;
  2551. }
  2552. static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
  2553. {
  2554. if (mdev->state.disk == D_DISKLESS &&
  2555. /* no need to be mdev->state.conn == C_STANDALONE &&
  2556. * we may want to delete a minor from a live replication group.
  2557. */
  2558. mdev->state.role == R_SECONDARY) {
  2559. drbd_delete_device(mdev);
  2560. return NO_ERROR;
  2561. } else
  2562. return ERR_MINOR_CONFIGURED;
  2563. }
  2564. int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
  2565. {
  2566. enum drbd_ret_code retcode;
  2567. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2568. if (!adm_ctx.reply_skb)
  2569. return retcode;
  2570. if (retcode != NO_ERROR)
  2571. goto out;
  2572. down_write(&drbd_cfg_rwsem);
  2573. retcode = adm_delete_minor(adm_ctx.mdev);
  2574. up_write(&drbd_cfg_rwsem);
  2575. out:
  2576. drbd_adm_finish(info, retcode);
  2577. return 0;
  2578. }
  2579. int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
  2580. {
  2581. enum drbd_ret_code retcode;
  2582. enum drbd_state_rv rv;
  2583. struct drbd_conf *mdev;
  2584. unsigned i;
  2585. retcode = drbd_adm_prepare(skb, info, 0);
  2586. if (!adm_ctx.reply_skb)
  2587. return retcode;
  2588. if (retcode != NO_ERROR)
  2589. goto out;
  2590. if (!adm_ctx.tconn) {
  2591. retcode = ERR_CONN_NOT_KNOWN;
  2592. goto out;
  2593. }
  2594. down_read(&drbd_cfg_rwsem);
  2595. /* demote */
  2596. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2597. retcode = drbd_set_role(mdev, R_SECONDARY, 0);
  2598. if (retcode < SS_SUCCESS) {
  2599. drbd_msg_put_info("failed to demote");
  2600. goto out_unlock;
  2601. }
  2602. }
  2603. /* disconnect */
  2604. rv = conn_try_disconnect(adm_ctx.tconn, 0);
  2605. if (rv < SS_SUCCESS) {
  2606. retcode = rv; /* enum type mismatch! */
  2607. drbd_msg_put_info("failed to disconnect");
  2608. goto out_unlock;
  2609. }
  2610. /* Make sure the network threads have actually stopped,
  2611. * state handling only does drbd_thread_stop_nowait(). */
  2612. drbd_thread_stop(&adm_ctx.tconn->receiver);
  2613. /* detach */
  2614. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2615. rv = adm_detach(mdev);
  2616. if (rv < SS_SUCCESS) {
  2617. retcode = rv; /* enum type mismatch! */
  2618. drbd_msg_put_info("failed to detach");
  2619. goto out_unlock;
  2620. }
  2621. }
  2622. up_read(&drbd_cfg_rwsem);
  2623. /* delete volumes */
  2624. down_write(&drbd_cfg_rwsem);
  2625. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2626. retcode = adm_delete_minor(mdev);
  2627. if (retcode != NO_ERROR) {
  2628. /* "can not happen" */
  2629. drbd_msg_put_info("failed to delete volume");
  2630. up_write(&drbd_cfg_rwsem);
  2631. goto out;
  2632. }
  2633. }
  2634. /* delete connection */
  2635. if (conn_lowest_minor(adm_ctx.tconn) < 0) {
  2636. drbd_thread_stop(&adm_ctx.tconn->worker);
  2637. list_del(&adm_ctx.tconn->all_tconn);
  2638. kref_put(&adm_ctx.tconn->kref, &conn_destroy);
  2639. retcode = NO_ERROR;
  2640. } else {
  2641. /* "can not happen" */
  2642. retcode = ERR_CONN_IN_USE;
  2643. drbd_msg_put_info("failed to delete connection");
  2644. }
  2645. up_write(&drbd_cfg_rwsem);
  2646. goto out;
  2647. out_unlock:
  2648. up_read(&drbd_cfg_rwsem);
  2649. out:
  2650. drbd_adm_finish(info, retcode);
  2651. return 0;
  2652. }
  2653. int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
  2654. {
  2655. enum drbd_ret_code retcode;
  2656. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  2657. if (!adm_ctx.reply_skb)
  2658. return retcode;
  2659. if (retcode != NO_ERROR)
  2660. goto out;
  2661. down_write(&drbd_cfg_rwsem);
  2662. if (conn_lowest_minor(adm_ctx.tconn) < 0) {
  2663. list_del(&adm_ctx.tconn->all_tconn);
  2664. kref_put(&adm_ctx.tconn->kref, &conn_destroy);
  2665. retcode = NO_ERROR;
  2666. } else {
  2667. retcode = ERR_CONN_IN_USE;
  2668. }
  2669. up_write(&drbd_cfg_rwsem);
  2670. out:
  2671. drbd_adm_finish(info, retcode);
  2672. return 0;
  2673. }
  2674. void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
  2675. {
  2676. static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
  2677. struct sk_buff *msg;
  2678. struct drbd_genlmsghdr *d_out;
  2679. unsigned seq;
  2680. int err = -ENOMEM;
  2681. seq = atomic_inc_return(&drbd_genl_seq);
  2682. msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
  2683. if (!msg)
  2684. goto failed;
  2685. err = -EMSGSIZE;
  2686. d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
  2687. if (!d_out) /* cannot happen, but anyways. */
  2688. goto nla_put_failure;
  2689. d_out->minor = mdev_to_minor(mdev);
  2690. d_out->ret_code = 0;
  2691. if (nla_put_status_info(msg, mdev, sib))
  2692. goto nla_put_failure;
  2693. genlmsg_end(msg, d_out);
  2694. err = drbd_genl_multicast_events(msg, 0);
  2695. /* msg has been consumed or freed in netlink_broadcast() */
  2696. if (err && err != -ESRCH)
  2697. goto failed;
  2698. return;
  2699. nla_put_failure:
  2700. nlmsg_free(msg);
  2701. failed:
  2702. dev_err(DEV, "Error %d while broadcasting event. "
  2703. "Event seq:%u sib_reason:%u\n",
  2704. err, seq, sib->sib_reason);
  2705. }