drbd_nl.c 81 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028
  1. /*
  2. drbd_nl.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/drbd.h>
  21. #include <linux/in.h>
  22. #include <linux/fs.h>
  23. #include <linux/file.h>
  24. #include <linux/slab.h>
  25. #include <linux/blkpg.h>
  26. #include <linux/cpumask.h>
  27. #include "drbd_int.h"
  28. #include "drbd_req.h"
  29. #include "drbd_wrappers.h"
  30. #include <asm/unaligned.h>
  31. #include <linux/drbd_limits.h>
  32. #include <linux/kthread.h>
  33. #include <net/genetlink.h>
  34. /* .doit */
  35. // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
  36. // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
  37. int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
  38. int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
  39. int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
  40. int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
  41. int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
  42. int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
  43. int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
  44. int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
  45. int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
  46. int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
  47. int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
  48. int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
  49. int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
  50. int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
  51. int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
  52. int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
  53. int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
  54. int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
  55. int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
  56. int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
  57. int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
  58. int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
  59. int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
  60. int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
  61. int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
  62. /* .dumpit */
  63. int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
  64. #include <linux/drbd_genl_api.h>
  65. #include <linux/genl_magic_func.h>
  66. /* used blkdev_get_by_path, to claim our meta data device(s) */
  67. static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
  68. /* Configuration is strictly serialized, because generic netlink message
  69. * processing is strictly serialized by the genl_lock().
  70. * Which means we can use one static global drbd_config_context struct.
  71. */
  72. static struct drbd_config_context {
  73. /* assigned from drbd_genlmsghdr */
  74. unsigned int minor;
  75. /* assigned from request attributes, if present */
  76. unsigned int volume;
  77. #define VOLUME_UNSPECIFIED (-1U)
  78. /* pointer into the request skb,
  79. * limited lifetime! */
  80. char *conn_name;
  81. /* reply buffer */
  82. struct sk_buff *reply_skb;
  83. /* pointer into reply buffer */
  84. struct drbd_genlmsghdr *reply_dh;
  85. /* resolved from attributes, if possible */
  86. struct drbd_conf *mdev;
  87. struct drbd_tconn *tconn;
  88. } adm_ctx;
  89. static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
  90. {
  91. genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
  92. if (genlmsg_reply(skb, info))
  93. printk(KERN_ERR "drbd: error sending genl reply\n");
  94. }
  95. /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
  96. * reason it could fail was no space in skb, and there are 4k available. */
  97. int drbd_msg_put_info(const char *info)
  98. {
  99. struct sk_buff *skb = adm_ctx.reply_skb;
  100. struct nlattr *nla;
  101. int err = -EMSGSIZE;
  102. if (!info || !info[0])
  103. return 0;
  104. nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
  105. if (!nla)
  106. return err;
  107. err = nla_put_string(skb, T_info_text, info);
  108. if (err) {
  109. nla_nest_cancel(skb, nla);
  110. return err;
  111. } else
  112. nla_nest_end(skb, nla);
  113. return 0;
  114. }
  115. /* This would be a good candidate for a "pre_doit" hook,
  116. * and per-family private info->pointers.
  117. * But we need to stay compatible with older kernels.
  118. * If it returns successfully, adm_ctx members are valid.
  119. */
  120. #define DRBD_ADM_NEED_MINOR 1
  121. #define DRBD_ADM_NEED_CONN 2
  122. static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
  123. unsigned flags)
  124. {
  125. struct drbd_genlmsghdr *d_in = info->userhdr;
  126. const u8 cmd = info->genlhdr->cmd;
  127. int err;
  128. memset(&adm_ctx, 0, sizeof(adm_ctx));
  129. /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
  130. if (cmd != DRBD_ADM_GET_STATUS
  131. && security_netlink_recv(skb, CAP_SYS_ADMIN))
  132. return -EPERM;
  133. adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  134. if (!adm_ctx.reply_skb)
  135. goto fail;
  136. adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
  137. info, &drbd_genl_family, 0, cmd);
  138. /* put of a few bytes into a fresh skb of >= 4k will always succeed.
  139. * but anyways */
  140. if (!adm_ctx.reply_dh)
  141. goto fail;
  142. adm_ctx.reply_dh->minor = d_in->minor;
  143. adm_ctx.reply_dh->ret_code = NO_ERROR;
  144. if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
  145. struct nlattr *nla;
  146. /* parse and validate only */
  147. err = drbd_cfg_context_from_attrs(NULL, info);
  148. if (err)
  149. goto fail;
  150. /* It was present, and valid,
  151. * copy it over to the reply skb. */
  152. err = nla_put_nohdr(adm_ctx.reply_skb,
  153. info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
  154. info->attrs[DRBD_NLA_CFG_CONTEXT]);
  155. if (err)
  156. goto fail;
  157. /* and assign stuff to the global adm_ctx */
  158. nla = nested_attr_tb[__nla_type(T_ctx_volume)];
  159. adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
  160. nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
  161. if (nla)
  162. adm_ctx.conn_name = nla_data(nla);
  163. } else
  164. adm_ctx.volume = VOLUME_UNSPECIFIED;
  165. adm_ctx.minor = d_in->minor;
  166. adm_ctx.mdev = minor_to_mdev(d_in->minor);
  167. adm_ctx.tconn = conn_by_name(adm_ctx.conn_name);
  168. if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
  169. drbd_msg_put_info("unknown minor");
  170. return ERR_MINOR_INVALID;
  171. }
  172. if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
  173. drbd_msg_put_info("unknown connection");
  174. return ERR_INVALID_REQUEST;
  175. }
  176. /* some more paranoia, if the request was over-determined */
  177. if (adm_ctx.mdev && adm_ctx.tconn &&
  178. adm_ctx.mdev->tconn != adm_ctx.tconn) {
  179. pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
  180. adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
  181. drbd_msg_put_info("minor exists in different connection");
  182. return ERR_INVALID_REQUEST;
  183. }
  184. if (adm_ctx.mdev &&
  185. adm_ctx.volume != VOLUME_UNSPECIFIED &&
  186. adm_ctx.volume != adm_ctx.mdev->vnr) {
  187. pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
  188. adm_ctx.minor, adm_ctx.volume,
  189. adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
  190. drbd_msg_put_info("minor exists as different volume");
  191. return ERR_INVALID_REQUEST;
  192. }
  193. if (adm_ctx.mdev && !adm_ctx.tconn)
  194. adm_ctx.tconn = adm_ctx.mdev->tconn;
  195. return NO_ERROR;
  196. fail:
  197. nlmsg_free(adm_ctx.reply_skb);
  198. adm_ctx.reply_skb = NULL;
  199. return -ENOMEM;
  200. }
  201. static int drbd_adm_finish(struct genl_info *info, int retcode)
  202. {
  203. struct nlattr *nla;
  204. const char *conn_name = NULL;
  205. if (!adm_ctx.reply_skb)
  206. return -ENOMEM;
  207. adm_ctx.reply_dh->ret_code = retcode;
  208. nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
  209. if (nla) {
  210. nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
  211. if (nla)
  212. conn_name = nla_data(nla);
  213. }
  214. drbd_adm_send_reply(adm_ctx.reply_skb, info);
  215. return 0;
  216. }
  217. static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
  218. {
  219. char *afs;
  220. if (get_net_conf(tconn)) {
  221. switch (((struct sockaddr *)tconn->net_conf->peer_addr)->sa_family) {
  222. case AF_INET6:
  223. afs = "ipv6";
  224. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
  225. &((struct sockaddr_in6 *)tconn->net_conf->peer_addr)->sin6_addr);
  226. break;
  227. case AF_INET:
  228. afs = "ipv4";
  229. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
  230. &((struct sockaddr_in *)tconn->net_conf->peer_addr)->sin_addr);
  231. break;
  232. default:
  233. afs = "ssocks";
  234. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
  235. &((struct sockaddr_in *)tconn->net_conf->peer_addr)->sin_addr);
  236. }
  237. snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
  238. put_net_conf(tconn);
  239. }
  240. }
  241. int drbd_khelper(struct drbd_conf *mdev, char *cmd)
  242. {
  243. char *envp[] = { "HOME=/",
  244. "TERM=linux",
  245. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  246. (char[20]) { }, /* address family */
  247. (char[60]) { }, /* address */
  248. NULL };
  249. char mb[12];
  250. char *argv[] = {usermode_helper, cmd, mb, NULL };
  251. struct sib_info sib;
  252. int ret;
  253. snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
  254. setup_khelper_env(mdev->tconn, envp);
  255. /* The helper may take some time.
  256. * write out any unsynced meta data changes now */
  257. drbd_md_sync(mdev);
  258. dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
  259. sib.sib_reason = SIB_HELPER_PRE;
  260. sib.helper_name = cmd;
  261. drbd_bcast_event(mdev, &sib);
  262. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  263. if (ret)
  264. dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  265. usermode_helper, cmd, mb,
  266. (ret >> 8) & 0xff, ret);
  267. else
  268. dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  269. usermode_helper, cmd, mb,
  270. (ret >> 8) & 0xff, ret);
  271. sib.sib_reason = SIB_HELPER_POST;
  272. sib.helper_exit_code = ret;
  273. drbd_bcast_event(mdev, &sib);
  274. if (ret < 0) /* Ignore any ERRNOs we got. */
  275. ret = 0;
  276. return ret;
  277. }
  278. static void conn_md_sync(struct drbd_tconn *tconn)
  279. {
  280. struct drbd_conf *mdev;
  281. int vnr;
  282. idr_for_each_entry(&tconn->volumes, mdev, vnr)
  283. drbd_md_sync(mdev);
  284. }
  285. int conn_khelper(struct drbd_tconn *tconn, char *cmd)
  286. {
  287. char *envp[] = { "HOME=/",
  288. "TERM=linux",
  289. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  290. (char[20]) { }, /* address family */
  291. (char[60]) { }, /* address */
  292. NULL };
  293. char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
  294. int ret;
  295. setup_khelper_env(tconn, envp);
  296. conn_md_sync(tconn);
  297. conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
  298. /* TODO: conn_bcast_event() ?? */
  299. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  300. if (ret)
  301. conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
  302. usermode_helper, cmd, tconn->name,
  303. (ret >> 8) & 0xff, ret);
  304. else
  305. conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
  306. usermode_helper, cmd, tconn->name,
  307. (ret >> 8) & 0xff, ret);
  308. /* TODO: conn_bcast_event() ?? */
  309. if (ret < 0) /* Ignore any ERRNOs we got. */
  310. ret = 0;
  311. return ret;
  312. }
  313. static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
  314. {
  315. enum drbd_fencing_p fp = FP_NOT_AVAIL;
  316. struct drbd_conf *mdev;
  317. int vnr;
  318. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  319. if (get_ldev_if_state(mdev, D_CONSISTENT)) {
  320. fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
  321. put_ldev(mdev);
  322. }
  323. }
  324. return fp;
  325. }
  326. bool conn_try_outdate_peer(struct drbd_tconn *tconn)
  327. {
  328. union drbd_state mask = { };
  329. union drbd_state val = { };
  330. enum drbd_fencing_p fp;
  331. char *ex_to_string;
  332. int r;
  333. if (tconn->cstate >= C_WF_REPORT_PARAMS) {
  334. conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
  335. return false;
  336. }
  337. fp = highest_fencing_policy(tconn);
  338. switch (fp) {
  339. case FP_NOT_AVAIL:
  340. conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
  341. goto out;
  342. case FP_DONT_CARE:
  343. return true;
  344. default: ;
  345. }
  346. r = conn_khelper(tconn, "fence-peer");
  347. switch ((r>>8) & 0xff) {
  348. case 3: /* peer is inconsistent */
  349. ex_to_string = "peer is inconsistent or worse";
  350. mask.pdsk = D_MASK;
  351. val.pdsk = D_INCONSISTENT;
  352. break;
  353. case 4: /* peer got outdated, or was already outdated */
  354. ex_to_string = "peer was fenced";
  355. mask.pdsk = D_MASK;
  356. val.pdsk = D_OUTDATED;
  357. break;
  358. case 5: /* peer was down */
  359. if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
  360. /* we will(have) create(d) a new UUID anyways... */
  361. ex_to_string = "peer is unreachable, assumed to be dead";
  362. mask.pdsk = D_MASK;
  363. val.pdsk = D_OUTDATED;
  364. } else {
  365. ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
  366. }
  367. break;
  368. case 6: /* Peer is primary, voluntarily outdate myself.
  369. * This is useful when an unconnected R_SECONDARY is asked to
  370. * become R_PRIMARY, but finds the other peer being active. */
  371. ex_to_string = "peer is active";
  372. conn_warn(tconn, "Peer is primary, outdating myself.\n");
  373. mask.disk = D_MASK;
  374. val.disk = D_OUTDATED;
  375. break;
  376. case 7:
  377. if (fp != FP_STONITH)
  378. conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
  379. ex_to_string = "peer was stonithed";
  380. mask.pdsk = D_MASK;
  381. val.pdsk = D_OUTDATED;
  382. break;
  383. default:
  384. /* The script is broken ... */
  385. conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
  386. return false; /* Eventually leave IO frozen */
  387. }
  388. conn_info(tconn, "fence-peer helper returned %d (%s)\n",
  389. (r>>8) & 0xff, ex_to_string);
  390. out:
  391. /* Not using
  392. conn_request_state(tconn, mask, val, CS_VERBOSE);
  393. here, because we might were able to re-establish the connection in the
  394. meantime. */
  395. spin_lock_irq(&tconn->req_lock);
  396. if (tconn->cstate < C_WF_REPORT_PARAMS)
  397. _conn_request_state(tconn, mask, val, CS_VERBOSE);
  398. spin_unlock_irq(&tconn->req_lock);
  399. return conn_highest_pdsk(tconn) <= D_OUTDATED;
  400. }
  401. static int _try_outdate_peer_async(void *data)
  402. {
  403. struct drbd_tconn *tconn = (struct drbd_tconn *)data;
  404. conn_try_outdate_peer(tconn);
  405. return 0;
  406. }
  407. void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
  408. {
  409. struct task_struct *opa;
  410. opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
  411. if (IS_ERR(opa))
  412. conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
  413. }
  414. enum drbd_state_rv
  415. drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
  416. {
  417. const int max_tries = 4;
  418. enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
  419. int try = 0;
  420. int forced = 0;
  421. union drbd_state mask, val;
  422. if (new_role == R_PRIMARY)
  423. request_ping(mdev->tconn); /* Detect a dead peer ASAP */
  424. mutex_lock(mdev->state_mutex);
  425. mask.i = 0; mask.role = R_MASK;
  426. val.i = 0; val.role = new_role;
  427. while (try++ < max_tries) {
  428. rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
  429. /* in case we first succeeded to outdate,
  430. * but now suddenly could establish a connection */
  431. if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
  432. val.pdsk = 0;
  433. mask.pdsk = 0;
  434. continue;
  435. }
  436. if (rv == SS_NO_UP_TO_DATE_DISK && force &&
  437. (mdev->state.disk < D_UP_TO_DATE &&
  438. mdev->state.disk >= D_INCONSISTENT)) {
  439. mask.disk = D_MASK;
  440. val.disk = D_UP_TO_DATE;
  441. forced = 1;
  442. continue;
  443. }
  444. if (rv == SS_NO_UP_TO_DATE_DISK &&
  445. mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
  446. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  447. if (conn_try_outdate_peer(mdev->tconn)) {
  448. val.disk = D_UP_TO_DATE;
  449. mask.disk = D_MASK;
  450. }
  451. continue;
  452. }
  453. if (rv == SS_NOTHING_TO_DO)
  454. goto out;
  455. if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
  456. if (!conn_try_outdate_peer(mdev->tconn) && force) {
  457. dev_warn(DEV, "Forced into split brain situation!\n");
  458. mask.pdsk = D_MASK;
  459. val.pdsk = D_OUTDATED;
  460. }
  461. continue;
  462. }
  463. if (rv == SS_TWO_PRIMARIES) {
  464. /* Maybe the peer is detected as dead very soon...
  465. retry at most once more in this case. */
  466. schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
  467. if (try < max_tries)
  468. try = max_tries - 1;
  469. continue;
  470. }
  471. if (rv < SS_SUCCESS) {
  472. rv = _drbd_request_state(mdev, mask, val,
  473. CS_VERBOSE + CS_WAIT_COMPLETE);
  474. if (rv < SS_SUCCESS)
  475. goto out;
  476. }
  477. break;
  478. }
  479. if (rv < SS_SUCCESS)
  480. goto out;
  481. if (forced)
  482. dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
  483. /* Wait until nothing is on the fly :) */
  484. wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
  485. if (new_role == R_SECONDARY) {
  486. set_disk_ro(mdev->vdisk, true);
  487. if (get_ldev(mdev)) {
  488. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  489. put_ldev(mdev);
  490. }
  491. } else {
  492. if (get_net_conf(mdev->tconn)) {
  493. mdev->tconn->net_conf->want_lose = 0;
  494. put_net_conf(mdev->tconn);
  495. }
  496. set_disk_ro(mdev->vdisk, false);
  497. if (get_ldev(mdev)) {
  498. if (((mdev->state.conn < C_CONNECTED ||
  499. mdev->state.pdsk <= D_FAILED)
  500. && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
  501. drbd_uuid_new_current(mdev);
  502. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  503. put_ldev(mdev);
  504. }
  505. }
  506. /* writeout of activity log covered areas of the bitmap
  507. * to stable storage done in after state change already */
  508. if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
  509. /* if this was forced, we should consider sync */
  510. if (forced)
  511. drbd_send_uuids(mdev);
  512. drbd_send_state(mdev);
  513. }
  514. drbd_md_sync(mdev);
  515. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  516. out:
  517. mutex_unlock(mdev->state_mutex);
  518. return rv;
  519. }
  520. static const char *from_attrs_err_to_txt(int err)
  521. {
  522. return err == -ENOMSG ? "required attribute missing" :
  523. err == -EOPNOTSUPP ? "unknown mandatory attribute" :
  524. err == -EEXIST ? "can not change invariant setting" :
  525. "invalid attribute value";
  526. }
  527. int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
  528. {
  529. struct set_role_parms parms;
  530. int err;
  531. enum drbd_ret_code retcode;
  532. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  533. if (!adm_ctx.reply_skb)
  534. return retcode;
  535. if (retcode != NO_ERROR)
  536. goto out;
  537. memset(&parms, 0, sizeof(parms));
  538. if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
  539. err = set_role_parms_from_attrs(&parms, info);
  540. if (err) {
  541. retcode = ERR_MANDATORY_TAG;
  542. drbd_msg_put_info(from_attrs_err_to_txt(err));
  543. goto out;
  544. }
  545. }
  546. if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
  547. retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
  548. else
  549. retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
  550. out:
  551. drbd_adm_finish(info, retcode);
  552. return 0;
  553. }
  554. /* initializes the md.*_offset members, so we are able to find
  555. * the on disk meta data */
  556. static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
  557. struct drbd_backing_dev *bdev)
  558. {
  559. sector_t md_size_sect = 0;
  560. switch (bdev->dc.meta_dev_idx) {
  561. default:
  562. /* v07 style fixed size indexed meta data */
  563. bdev->md.md_size_sect = MD_RESERVED_SECT;
  564. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  565. bdev->md.al_offset = MD_AL_OFFSET;
  566. bdev->md.bm_offset = MD_BM_OFFSET;
  567. break;
  568. case DRBD_MD_INDEX_FLEX_EXT:
  569. /* just occupy the full device; unit: sectors */
  570. bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
  571. bdev->md.md_offset = 0;
  572. bdev->md.al_offset = MD_AL_OFFSET;
  573. bdev->md.bm_offset = MD_BM_OFFSET;
  574. break;
  575. case DRBD_MD_INDEX_INTERNAL:
  576. case DRBD_MD_INDEX_FLEX_INT:
  577. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  578. /* al size is still fixed */
  579. bdev->md.al_offset = -MD_AL_SECTORS;
  580. /* we need (slightly less than) ~ this much bitmap sectors: */
  581. md_size_sect = drbd_get_capacity(bdev->backing_bdev);
  582. md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
  583. md_size_sect = BM_SECT_TO_EXT(md_size_sect);
  584. md_size_sect = ALIGN(md_size_sect, 8);
  585. /* plus the "drbd meta data super block",
  586. * and the activity log; */
  587. md_size_sect += MD_BM_OFFSET;
  588. bdev->md.md_size_sect = md_size_sect;
  589. /* bitmap offset is adjusted by 'super' block size */
  590. bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
  591. break;
  592. }
  593. }
  594. /* input size is expected to be in KB */
  595. char *ppsize(char *buf, unsigned long long size)
  596. {
  597. /* Needs 9 bytes at max including trailing NUL:
  598. * -1ULL ==> "16384 EB" */
  599. static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
  600. int base = 0;
  601. while (size >= 10000 && base < sizeof(units)-1) {
  602. /* shift + round */
  603. size = (size >> 10) + !!(size & (1<<9));
  604. base++;
  605. }
  606. sprintf(buf, "%u %cB", (unsigned)size, units[base]);
  607. return buf;
  608. }
  609. /* there is still a theoretical deadlock when called from receiver
  610. * on an D_INCONSISTENT R_PRIMARY:
  611. * remote READ does inc_ap_bio, receiver would need to receive answer
  612. * packet from remote to dec_ap_bio again.
  613. * receiver receive_sizes(), comes here,
  614. * waits for ap_bio_cnt == 0. -> deadlock.
  615. * but this cannot happen, actually, because:
  616. * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
  617. * (not connected, or bad/no disk on peer):
  618. * see drbd_fail_request_early, ap_bio_cnt is zero.
  619. * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
  620. * peer may not initiate a resize.
  621. */
  622. /* Note these are not to be confused with
  623. * drbd_adm_suspend_io/drbd_adm_resume_io,
  624. * which are (sub) state changes triggered by admin (drbdsetup),
  625. * and can be long lived.
  626. * This changes an mdev->flag, is triggered by drbd internals,
  627. * and should be short-lived. */
  628. void drbd_suspend_io(struct drbd_conf *mdev)
  629. {
  630. set_bit(SUSPEND_IO, &mdev->flags);
  631. if (drbd_suspended(mdev))
  632. return;
  633. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
  634. }
  635. void drbd_resume_io(struct drbd_conf *mdev)
  636. {
  637. clear_bit(SUSPEND_IO, &mdev->flags);
  638. wake_up(&mdev->misc_wait);
  639. }
  640. /**
  641. * drbd_determine_dev_size() - Sets the right device size obeying all constraints
  642. * @mdev: DRBD device.
  643. *
  644. * Returns 0 on success, negative return values indicate errors.
  645. * You should call drbd_md_sync() after calling this function.
  646. */
  647. enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
  648. {
  649. sector_t prev_first_sect, prev_size; /* previous meta location */
  650. sector_t la_size;
  651. sector_t size;
  652. char ppb[10];
  653. int md_moved, la_size_changed;
  654. enum determine_dev_size rv = unchanged;
  655. /* race:
  656. * application request passes inc_ap_bio,
  657. * but then cannot get an AL-reference.
  658. * this function later may wait on ap_bio_cnt == 0. -> deadlock.
  659. *
  660. * to avoid that:
  661. * Suspend IO right here.
  662. * still lock the act_log to not trigger ASSERTs there.
  663. */
  664. drbd_suspend_io(mdev);
  665. /* no wait necessary anymore, actually we could assert that */
  666. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  667. prev_first_sect = drbd_md_first_sector(mdev->ldev);
  668. prev_size = mdev->ldev->md.md_size_sect;
  669. la_size = mdev->ldev->md.la_size_sect;
  670. /* TODO: should only be some assert here, not (re)init... */
  671. drbd_md_set_sector_offsets(mdev, mdev->ldev);
  672. size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
  673. if (drbd_get_capacity(mdev->this_bdev) != size ||
  674. drbd_bm_capacity(mdev) != size) {
  675. int err;
  676. err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
  677. if (unlikely(err)) {
  678. /* currently there is only one error: ENOMEM! */
  679. size = drbd_bm_capacity(mdev)>>1;
  680. if (size == 0) {
  681. dev_err(DEV, "OUT OF MEMORY! "
  682. "Could not allocate bitmap!\n");
  683. } else {
  684. dev_err(DEV, "BM resizing failed. "
  685. "Leaving size unchanged at size = %lu KB\n",
  686. (unsigned long)size);
  687. }
  688. rv = dev_size_error;
  689. }
  690. /* racy, see comments above. */
  691. drbd_set_my_capacity(mdev, size);
  692. mdev->ldev->md.la_size_sect = size;
  693. dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
  694. (unsigned long long)size>>1);
  695. }
  696. if (rv == dev_size_error)
  697. goto out;
  698. la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
  699. md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
  700. || prev_size != mdev->ldev->md.md_size_sect;
  701. if (la_size_changed || md_moved) {
  702. int err;
  703. drbd_al_shrink(mdev); /* All extents inactive. */
  704. dev_info(DEV, "Writing the whole bitmap, %s\n",
  705. la_size_changed && md_moved ? "size changed and md moved" :
  706. la_size_changed ? "size changed" : "md moved");
  707. /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
  708. err = drbd_bitmap_io(mdev, &drbd_bm_write,
  709. "size changed", BM_LOCKED_MASK);
  710. if (err) {
  711. rv = dev_size_error;
  712. goto out;
  713. }
  714. drbd_md_mark_dirty(mdev);
  715. }
  716. if (size > la_size)
  717. rv = grew;
  718. if (size < la_size)
  719. rv = shrunk;
  720. out:
  721. lc_unlock(mdev->act_log);
  722. wake_up(&mdev->al_wait);
  723. drbd_resume_io(mdev);
  724. return rv;
  725. }
  726. sector_t
  727. drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
  728. {
  729. sector_t p_size = mdev->p_size; /* partner's disk size. */
  730. sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
  731. sector_t m_size; /* my size */
  732. sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
  733. sector_t size = 0;
  734. m_size = drbd_get_max_capacity(bdev);
  735. if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
  736. dev_warn(DEV, "Resize while not connected was forced by the user!\n");
  737. p_size = m_size;
  738. }
  739. if (p_size && m_size) {
  740. size = min_t(sector_t, p_size, m_size);
  741. } else {
  742. if (la_size) {
  743. size = la_size;
  744. if (m_size && m_size < size)
  745. size = m_size;
  746. if (p_size && p_size < size)
  747. size = p_size;
  748. } else {
  749. if (m_size)
  750. size = m_size;
  751. if (p_size)
  752. size = p_size;
  753. }
  754. }
  755. if (size == 0)
  756. dev_err(DEV, "Both nodes diskless!\n");
  757. if (u_size) {
  758. if (u_size > size)
  759. dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
  760. (unsigned long)u_size>>1, (unsigned long)size>>1);
  761. else
  762. size = u_size;
  763. }
  764. return size;
  765. }
  766. /**
  767. * drbd_check_al_size() - Ensures that the AL is of the right size
  768. * @mdev: DRBD device.
  769. *
  770. * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
  771. * failed, and 0 on success. You should call drbd_md_sync() after you called
  772. * this function.
  773. */
  774. static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
  775. {
  776. struct lru_cache *n, *t;
  777. struct lc_element *e;
  778. unsigned int in_use;
  779. int i;
  780. if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
  781. dc->al_extents = DRBD_AL_EXTENTS_MIN;
  782. if (mdev->act_log &&
  783. mdev->act_log->nr_elements == dc->al_extents)
  784. return 0;
  785. in_use = 0;
  786. t = mdev->act_log;
  787. n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
  788. dc->al_extents, sizeof(struct lc_element), 0);
  789. if (n == NULL) {
  790. dev_err(DEV, "Cannot allocate act_log lru!\n");
  791. return -ENOMEM;
  792. }
  793. spin_lock_irq(&mdev->al_lock);
  794. if (t) {
  795. for (i = 0; i < t->nr_elements; i++) {
  796. e = lc_element_by_index(t, i);
  797. if (e->refcnt)
  798. dev_err(DEV, "refcnt(%d)==%d\n",
  799. e->lc_number, e->refcnt);
  800. in_use += e->refcnt;
  801. }
  802. }
  803. if (!in_use)
  804. mdev->act_log = n;
  805. spin_unlock_irq(&mdev->al_lock);
  806. if (in_use) {
  807. dev_err(DEV, "Activity log still in use!\n");
  808. lc_destroy(n);
  809. return -EBUSY;
  810. } else {
  811. if (t)
  812. lc_destroy(t);
  813. }
  814. drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
  815. return 0;
  816. }
  817. static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
  818. {
  819. struct request_queue * const q = mdev->rq_queue;
  820. int max_hw_sectors = max_bio_size >> 9;
  821. int max_segments = 0;
  822. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  823. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  824. max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
  825. max_segments = mdev->ldev->dc.max_bio_bvecs;
  826. put_ldev(mdev);
  827. }
  828. blk_queue_logical_block_size(q, 512);
  829. blk_queue_max_hw_sectors(q, max_hw_sectors);
  830. /* This is the workaround for "bio would need to, but cannot, be split" */
  831. blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
  832. blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
  833. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  834. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  835. blk_queue_stack_limits(q, b);
  836. if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
  837. dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
  838. q->backing_dev_info.ra_pages,
  839. b->backing_dev_info.ra_pages);
  840. q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
  841. }
  842. put_ldev(mdev);
  843. }
  844. }
  845. void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
  846. {
  847. int now, new, local, peer;
  848. now = queue_max_hw_sectors(mdev->rq_queue) << 9;
  849. local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
  850. peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
  851. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  852. local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
  853. mdev->local_max_bio_size = local;
  854. put_ldev(mdev);
  855. }
  856. /* We may ignore peer limits if the peer is modern enough.
  857. Because new from 8.3.8 onwards the peer can use multiple
  858. BIOs for a single peer_request */
  859. if (mdev->state.conn >= C_CONNECTED) {
  860. if (mdev->tconn->agreed_pro_version < 94)
  861. peer = mdev->peer_max_bio_size;
  862. else if (mdev->tconn->agreed_pro_version == 94)
  863. peer = DRBD_MAX_SIZE_H80_PACKET;
  864. else /* drbd 8.3.8 onwards */
  865. peer = DRBD_MAX_BIO_SIZE;
  866. }
  867. new = min_t(int, local, peer);
  868. if (mdev->state.role == R_PRIMARY && new < now)
  869. dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
  870. if (new != now)
  871. dev_info(DEV, "max BIO size = %u\n", new);
  872. drbd_setup_queue_param(mdev, new);
  873. }
  874. /* serialize deconfig (worker exiting, doing cleanup)
  875. * and reconfig (drbdsetup disk, drbdsetup net)
  876. *
  877. * Wait for a potentially exiting worker, then restart it,
  878. * or start a new one. Flush any pending work, there may still be an
  879. * after_state_change queued.
  880. */
  881. static void conn_reconfig_start(struct drbd_tconn *tconn)
  882. {
  883. wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
  884. wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
  885. drbd_thread_start(&tconn->worker);
  886. conn_flush_workqueue(tconn);
  887. }
  888. /* if still unconfigured, stops worker again.
  889. * if configured now, clears CONFIG_PENDING.
  890. * wakes potential waiters */
  891. static void conn_reconfig_done(struct drbd_tconn *tconn)
  892. {
  893. spin_lock_irq(&tconn->req_lock);
  894. if (conn_all_vols_unconf(tconn)) {
  895. set_bit(OBJECT_DYING, &tconn->flags);
  896. drbd_thread_stop_nowait(&tconn->worker);
  897. } else
  898. clear_bit(CONFIG_PENDING, &tconn->flags);
  899. spin_unlock_irq(&tconn->req_lock);
  900. wake_up(&tconn->ping_wait);
  901. }
  902. /* Make sure IO is suspended before calling this function(). */
  903. static void drbd_suspend_al(struct drbd_conf *mdev)
  904. {
  905. int s = 0;
  906. if (!lc_try_lock(mdev->act_log)) {
  907. dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
  908. return;
  909. }
  910. drbd_al_shrink(mdev);
  911. spin_lock_irq(&mdev->tconn->req_lock);
  912. if (mdev->state.conn < C_CONNECTED)
  913. s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
  914. spin_unlock_irq(&mdev->tconn->req_lock);
  915. lc_unlock(mdev->act_log);
  916. if (s)
  917. dev_info(DEV, "Suspended AL updates\n");
  918. }
  919. int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
  920. {
  921. enum drbd_ret_code retcode;
  922. struct drbd_conf *mdev;
  923. struct disk_conf *ndc; /* new disk conf */
  924. int err, fifo_size;
  925. int *rs_plan_s = NULL;
  926. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  927. if (!adm_ctx.reply_skb)
  928. return retcode;
  929. if (retcode != NO_ERROR)
  930. goto out;
  931. mdev = adm_ctx.mdev;
  932. /* we also need a disk
  933. * to change the options on */
  934. if (!get_ldev(mdev)) {
  935. retcode = ERR_NO_DISK;
  936. goto out;
  937. }
  938. /* FIXME freeze IO, cluster wide.
  939. *
  940. * We should make sure no-one uses
  941. * some half-updated struct when we
  942. * assign it later. */
  943. ndc = kmalloc(sizeof(*ndc), GFP_KERNEL);
  944. if (!ndc) {
  945. retcode = ERR_NOMEM;
  946. goto fail;
  947. }
  948. memcpy(ndc, &mdev->ldev->dc, sizeof(*ndc));
  949. err = disk_conf_from_attrs_for_change(ndc, info);
  950. if (err) {
  951. retcode = ERR_MANDATORY_TAG;
  952. drbd_msg_put_info(from_attrs_err_to_txt(err));
  953. }
  954. if (!expect(ndc->resync_rate >= 1))
  955. ndc->resync_rate = 1;
  956. /* clip to allowed range */
  957. if (!expect(ndc->al_extents >= DRBD_AL_EXTENTS_MIN))
  958. ndc->al_extents = DRBD_AL_EXTENTS_MIN;
  959. if (!expect(ndc->al_extents <= DRBD_AL_EXTENTS_MAX))
  960. ndc->al_extents = DRBD_AL_EXTENTS_MAX;
  961. /* most sanity checks done, try to assign the new sync-after
  962. * dependency. need to hold the global lock in there,
  963. * to avoid a race in the dependency loop check. */
  964. retcode = drbd_alter_sa(mdev, ndc->resync_after);
  965. if (retcode != NO_ERROR)
  966. goto fail;
  967. fifo_size = (ndc->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
  968. if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
  969. rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
  970. if (!rs_plan_s) {
  971. dev_err(DEV, "kmalloc of fifo_buffer failed");
  972. retcode = ERR_NOMEM;
  973. goto fail;
  974. }
  975. }
  976. if (fifo_size != mdev->rs_plan_s.size) {
  977. kfree(mdev->rs_plan_s.values);
  978. mdev->rs_plan_s.values = rs_plan_s;
  979. mdev->rs_plan_s.size = fifo_size;
  980. mdev->rs_planed = 0;
  981. rs_plan_s = NULL;
  982. }
  983. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  984. drbd_al_shrink(mdev);
  985. err = drbd_check_al_size(mdev, ndc);
  986. lc_unlock(mdev->act_log);
  987. wake_up(&mdev->al_wait);
  988. if (err) {
  989. retcode = ERR_NOMEM;
  990. goto fail;
  991. }
  992. /* FIXME
  993. * To avoid someone looking at a half-updated struct, we probably
  994. * should have a rw-semaphor on net_conf and disk_conf.
  995. */
  996. mdev->ldev->dc = *ndc;
  997. drbd_md_sync(mdev);
  998. if (mdev->state.conn >= C_CONNECTED)
  999. drbd_send_sync_param(mdev);
  1000. fail:
  1001. put_ldev(mdev);
  1002. kfree(ndc);
  1003. kfree(rs_plan_s);
  1004. out:
  1005. drbd_adm_finish(info, retcode);
  1006. return 0;
  1007. }
  1008. int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
  1009. {
  1010. struct drbd_conf *mdev;
  1011. int err;
  1012. enum drbd_ret_code retcode;
  1013. enum determine_dev_size dd;
  1014. sector_t max_possible_sectors;
  1015. sector_t min_md_device_sectors;
  1016. struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
  1017. struct block_device *bdev;
  1018. struct lru_cache *resync_lru = NULL;
  1019. union drbd_state ns, os;
  1020. enum drbd_state_rv rv;
  1021. int cp_discovered = 0;
  1022. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1023. if (!adm_ctx.reply_skb)
  1024. return retcode;
  1025. if (retcode != NO_ERROR)
  1026. goto finish;
  1027. mdev = adm_ctx.mdev;
  1028. conn_reconfig_start(mdev->tconn);
  1029. /* if you want to reconfigure, please tear down first */
  1030. if (mdev->state.disk > D_DISKLESS) {
  1031. retcode = ERR_DISK_CONFIGURED;
  1032. goto fail;
  1033. }
  1034. /* It may just now have detached because of IO error. Make sure
  1035. * drbd_ldev_destroy is done already, we may end up here very fast,
  1036. * e.g. if someone calls attach from the on-io-error handler,
  1037. * to realize a "hot spare" feature (not that I'd recommend that) */
  1038. wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
  1039. /* allocation not in the IO path, drbdsetup context */
  1040. nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
  1041. if (!nbc) {
  1042. retcode = ERR_NOMEM;
  1043. goto fail;
  1044. }
  1045. nbc->dc = (struct disk_conf) {
  1046. {}, 0, /* backing_dev */
  1047. {}, 0, /* meta_dev */
  1048. 0, /* meta_dev_idx */
  1049. DRBD_DISK_SIZE_SECT_DEF, /* disk_size */
  1050. DRBD_MAX_BIO_BVECS_DEF, /* max_bio_bvecs */
  1051. DRBD_ON_IO_ERROR_DEF, /* on_io_error */
  1052. DRBD_FENCING_DEF, /* fencing */
  1053. DRBD_RATE_DEF, /* resync_rate */
  1054. DRBD_AFTER_DEF, /* resync_after */
  1055. DRBD_AL_EXTENTS_DEF, /* al_extents */
  1056. DRBD_C_PLAN_AHEAD_DEF, /* c_plan_ahead */
  1057. DRBD_C_DELAY_TARGET_DEF, /* c_delay_target */
  1058. DRBD_C_FILL_TARGET_DEF, /* c_fill_target */
  1059. DRBD_C_MAX_RATE_DEF, /* c_max_rate */
  1060. DRBD_C_MIN_RATE_DEF, /* c_min_rate */
  1061. 0, /* no_disk_barrier */
  1062. 0, /* no_disk_flush */
  1063. 0, /* no_disk_drain */
  1064. 0, /* no_md_flush */
  1065. };
  1066. err = disk_conf_from_attrs(&nbc->dc, info);
  1067. if (err) {
  1068. retcode = ERR_MANDATORY_TAG;
  1069. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1070. goto fail;
  1071. }
  1072. if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
  1073. retcode = ERR_MD_IDX_INVALID;
  1074. goto fail;
  1075. }
  1076. if (get_net_conf(mdev->tconn)) {
  1077. int prot = mdev->tconn->net_conf->wire_protocol;
  1078. put_net_conf(mdev->tconn);
  1079. if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
  1080. retcode = ERR_STONITH_AND_PROT_A;
  1081. goto fail;
  1082. }
  1083. }
  1084. bdev = blkdev_get_by_path(nbc->dc.backing_dev,
  1085. FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
  1086. if (IS_ERR(bdev)) {
  1087. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
  1088. PTR_ERR(bdev));
  1089. retcode = ERR_OPEN_DISK;
  1090. goto fail;
  1091. }
  1092. nbc->backing_bdev = bdev;
  1093. /*
  1094. * meta_dev_idx >= 0: external fixed size, possibly multiple
  1095. * drbd sharing one meta device. TODO in that case, paranoia
  1096. * check that [md_bdev, meta_dev_idx] is not yet used by some
  1097. * other drbd minor! (if you use drbd.conf + drbdadm, that
  1098. * should check it for you already; but if you don't, or
  1099. * someone fooled it, we need to double check here)
  1100. */
  1101. bdev = blkdev_get_by_path(nbc->dc.meta_dev,
  1102. FMODE_READ | FMODE_WRITE | FMODE_EXCL,
  1103. ((int)nbc->dc.meta_dev_idx < 0) ?
  1104. (void *)mdev : (void *)drbd_m_holder);
  1105. if (IS_ERR(bdev)) {
  1106. dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
  1107. PTR_ERR(bdev));
  1108. retcode = ERR_OPEN_MD_DISK;
  1109. goto fail;
  1110. }
  1111. nbc->md_bdev = bdev;
  1112. if ((nbc->backing_bdev == nbc->md_bdev) !=
  1113. (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
  1114. nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
  1115. retcode = ERR_MD_IDX_INVALID;
  1116. goto fail;
  1117. }
  1118. resync_lru = lc_create("resync", drbd_bm_ext_cache,
  1119. 1, 61, sizeof(struct bm_extent),
  1120. offsetof(struct bm_extent, lce));
  1121. if (!resync_lru) {
  1122. retcode = ERR_NOMEM;
  1123. goto fail;
  1124. }
  1125. /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
  1126. drbd_md_set_sector_offsets(mdev, nbc);
  1127. if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
  1128. dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
  1129. (unsigned long long) drbd_get_max_capacity(nbc),
  1130. (unsigned long long) nbc->dc.disk_size);
  1131. retcode = ERR_DISK_TO_SMALL;
  1132. goto fail;
  1133. }
  1134. if ((int)nbc->dc.meta_dev_idx < 0) {
  1135. max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
  1136. /* at least one MB, otherwise it does not make sense */
  1137. min_md_device_sectors = (2<<10);
  1138. } else {
  1139. max_possible_sectors = DRBD_MAX_SECTORS;
  1140. min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
  1141. }
  1142. if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
  1143. retcode = ERR_MD_DISK_TO_SMALL;
  1144. dev_warn(DEV, "refusing attach: md-device too small, "
  1145. "at least %llu sectors needed for this meta-disk type\n",
  1146. (unsigned long long) min_md_device_sectors);
  1147. goto fail;
  1148. }
  1149. /* Make sure the new disk is big enough
  1150. * (we may currently be R_PRIMARY with no local disk...) */
  1151. if (drbd_get_max_capacity(nbc) <
  1152. drbd_get_capacity(mdev->this_bdev)) {
  1153. retcode = ERR_DISK_TO_SMALL;
  1154. goto fail;
  1155. }
  1156. nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
  1157. if (nbc->known_size > max_possible_sectors) {
  1158. dev_warn(DEV, "==> truncating very big lower level device "
  1159. "to currently maximum possible %llu sectors <==\n",
  1160. (unsigned long long) max_possible_sectors);
  1161. if ((int)nbc->dc.meta_dev_idx >= 0)
  1162. dev_warn(DEV, "==>> using internal or flexible "
  1163. "meta data may help <<==\n");
  1164. }
  1165. drbd_suspend_io(mdev);
  1166. /* also wait for the last barrier ack. */
  1167. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
  1168. /* and for any other previously queued work */
  1169. drbd_flush_workqueue(mdev);
  1170. rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
  1171. retcode = rv; /* FIXME: Type mismatch. */
  1172. drbd_resume_io(mdev);
  1173. if (rv < SS_SUCCESS)
  1174. goto fail;
  1175. if (!get_ldev_if_state(mdev, D_ATTACHING))
  1176. goto force_diskless;
  1177. drbd_md_set_sector_offsets(mdev, nbc);
  1178. if (!mdev->bitmap) {
  1179. if (drbd_bm_init(mdev)) {
  1180. retcode = ERR_NOMEM;
  1181. goto force_diskless_dec;
  1182. }
  1183. }
  1184. retcode = drbd_md_read(mdev, nbc);
  1185. if (retcode != NO_ERROR)
  1186. goto force_diskless_dec;
  1187. if (mdev->state.conn < C_CONNECTED &&
  1188. mdev->state.role == R_PRIMARY &&
  1189. (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
  1190. dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
  1191. (unsigned long long)mdev->ed_uuid);
  1192. retcode = ERR_DATA_NOT_CURRENT;
  1193. goto force_diskless_dec;
  1194. }
  1195. /* Since we are diskless, fix the activity log first... */
  1196. if (drbd_check_al_size(mdev, &nbc->dc)) {
  1197. retcode = ERR_NOMEM;
  1198. goto force_diskless_dec;
  1199. }
  1200. /* Prevent shrinking of consistent devices ! */
  1201. if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
  1202. drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
  1203. dev_warn(DEV, "refusing to truncate a consistent device\n");
  1204. retcode = ERR_DISK_TO_SMALL;
  1205. goto force_diskless_dec;
  1206. }
  1207. if (!drbd_al_read_log(mdev, nbc)) {
  1208. retcode = ERR_IO_MD_DISK;
  1209. goto force_diskless_dec;
  1210. }
  1211. /* Reset the "barriers don't work" bits here, then force meta data to
  1212. * be written, to ensure we determine if barriers are supported. */
  1213. if (nbc->dc.no_md_flush)
  1214. set_bit(MD_NO_FUA, &mdev->flags);
  1215. else
  1216. clear_bit(MD_NO_FUA, &mdev->flags);
  1217. /* Point of no return reached.
  1218. * Devices and memory are no longer released by error cleanup below.
  1219. * now mdev takes over responsibility, and the state engine should
  1220. * clean it up somewhere. */
  1221. D_ASSERT(mdev->ldev == NULL);
  1222. mdev->ldev = nbc;
  1223. mdev->resync = resync_lru;
  1224. nbc = NULL;
  1225. resync_lru = NULL;
  1226. mdev->write_ordering = WO_bdev_flush;
  1227. drbd_bump_write_ordering(mdev, WO_bdev_flush);
  1228. if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
  1229. set_bit(CRASHED_PRIMARY, &mdev->flags);
  1230. else
  1231. clear_bit(CRASHED_PRIMARY, &mdev->flags);
  1232. if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  1233. !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
  1234. set_bit(CRASHED_PRIMARY, &mdev->flags);
  1235. cp_discovered = 1;
  1236. }
  1237. mdev->send_cnt = 0;
  1238. mdev->recv_cnt = 0;
  1239. mdev->read_cnt = 0;
  1240. mdev->writ_cnt = 0;
  1241. drbd_reconsider_max_bio_size(mdev);
  1242. /* If I am currently not R_PRIMARY,
  1243. * but meta data primary indicator is set,
  1244. * I just now recover from a hard crash,
  1245. * and have been R_PRIMARY before that crash.
  1246. *
  1247. * Now, if I had no connection before that crash
  1248. * (have been degraded R_PRIMARY), chances are that
  1249. * I won't find my peer now either.
  1250. *
  1251. * In that case, and _only_ in that case,
  1252. * we use the degr-wfc-timeout instead of the default,
  1253. * so we can automatically recover from a crash of a
  1254. * degraded but active "cluster" after a certain timeout.
  1255. */
  1256. clear_bit(USE_DEGR_WFC_T, &mdev->flags);
  1257. if (mdev->state.role != R_PRIMARY &&
  1258. drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  1259. !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
  1260. set_bit(USE_DEGR_WFC_T, &mdev->flags);
  1261. dd = drbd_determine_dev_size(mdev, 0);
  1262. if (dd == dev_size_error) {
  1263. retcode = ERR_NOMEM_BITMAP;
  1264. goto force_diskless_dec;
  1265. } else if (dd == grew)
  1266. set_bit(RESYNC_AFTER_NEG, &mdev->flags);
  1267. if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
  1268. dev_info(DEV, "Assuming that all blocks are out of sync "
  1269. "(aka FullSync)\n");
  1270. if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
  1271. "set_n_write from attaching", BM_LOCKED_MASK)) {
  1272. retcode = ERR_IO_MD_DISK;
  1273. goto force_diskless_dec;
  1274. }
  1275. } else {
  1276. if (drbd_bitmap_io(mdev, &drbd_bm_read,
  1277. "read from attaching", BM_LOCKED_MASK)) {
  1278. retcode = ERR_IO_MD_DISK;
  1279. goto force_diskless_dec;
  1280. }
  1281. }
  1282. if (cp_discovered) {
  1283. drbd_al_apply_to_bm(mdev);
  1284. if (drbd_bitmap_io(mdev, &drbd_bm_write,
  1285. "crashed primary apply AL", BM_LOCKED_MASK)) {
  1286. retcode = ERR_IO_MD_DISK;
  1287. goto force_diskless_dec;
  1288. }
  1289. }
  1290. if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
  1291. drbd_suspend_al(mdev); /* IO is still suspended here... */
  1292. spin_lock_irq(&mdev->tconn->req_lock);
  1293. os = drbd_read_state(mdev);
  1294. ns = os;
  1295. /* If MDF_CONSISTENT is not set go into inconsistent state,
  1296. otherwise investigate MDF_WasUpToDate...
  1297. If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
  1298. otherwise into D_CONSISTENT state.
  1299. */
  1300. if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
  1301. if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
  1302. ns.disk = D_CONSISTENT;
  1303. else
  1304. ns.disk = D_OUTDATED;
  1305. } else {
  1306. ns.disk = D_INCONSISTENT;
  1307. }
  1308. if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
  1309. ns.pdsk = D_OUTDATED;
  1310. if ( ns.disk == D_CONSISTENT &&
  1311. (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
  1312. ns.disk = D_UP_TO_DATE;
  1313. /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
  1314. MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
  1315. this point, because drbd_request_state() modifies these
  1316. flags. */
  1317. /* In case we are C_CONNECTED postpone any decision on the new disk
  1318. state after the negotiation phase. */
  1319. if (mdev->state.conn == C_CONNECTED) {
  1320. mdev->new_state_tmp.i = ns.i;
  1321. ns.i = os.i;
  1322. ns.disk = D_NEGOTIATING;
  1323. /* We expect to receive up-to-date UUIDs soon.
  1324. To avoid a race in receive_state, free p_uuid while
  1325. holding req_lock. I.e. atomic with the state change */
  1326. kfree(mdev->p_uuid);
  1327. mdev->p_uuid = NULL;
  1328. }
  1329. rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
  1330. spin_unlock_irq(&mdev->tconn->req_lock);
  1331. if (rv < SS_SUCCESS)
  1332. goto force_diskless_dec;
  1333. if (mdev->state.role == R_PRIMARY)
  1334. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  1335. else
  1336. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  1337. drbd_md_mark_dirty(mdev);
  1338. drbd_md_sync(mdev);
  1339. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1340. put_ldev(mdev);
  1341. conn_reconfig_done(mdev->tconn);
  1342. drbd_adm_finish(info, retcode);
  1343. return 0;
  1344. force_diskless_dec:
  1345. put_ldev(mdev);
  1346. force_diskless:
  1347. drbd_force_state(mdev, NS(disk, D_FAILED));
  1348. drbd_md_sync(mdev);
  1349. fail:
  1350. conn_reconfig_done(mdev->tconn);
  1351. if (nbc) {
  1352. if (nbc->backing_bdev)
  1353. blkdev_put(nbc->backing_bdev,
  1354. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1355. if (nbc->md_bdev)
  1356. blkdev_put(nbc->md_bdev,
  1357. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1358. kfree(nbc);
  1359. }
  1360. lc_destroy(resync_lru);
  1361. finish:
  1362. drbd_adm_finish(info, retcode);
  1363. return 0;
  1364. }
  1365. static int adm_detach(struct drbd_conf *mdev)
  1366. {
  1367. enum drbd_state_rv retcode;
  1368. drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
  1369. retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
  1370. wait_event(mdev->misc_wait,
  1371. mdev->state.disk != D_DISKLESS ||
  1372. !atomic_read(&mdev->local_cnt));
  1373. drbd_resume_io(mdev);
  1374. return retcode;
  1375. }
  1376. /* Detaching the disk is a process in multiple stages. First we need to lock
  1377. * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
  1378. * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
  1379. * internal references as well.
  1380. * Only then we have finally detached. */
  1381. int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
  1382. {
  1383. enum drbd_ret_code retcode;
  1384. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1385. if (!adm_ctx.reply_skb)
  1386. return retcode;
  1387. if (retcode != NO_ERROR)
  1388. goto out;
  1389. retcode = adm_detach(adm_ctx.mdev);
  1390. out:
  1391. drbd_adm_finish(info, retcode);
  1392. return 0;
  1393. }
  1394. static bool conn_resync_running(struct drbd_tconn *tconn)
  1395. {
  1396. struct drbd_conf *mdev;
  1397. int vnr;
  1398. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  1399. if (mdev->state.conn == C_SYNC_SOURCE ||
  1400. mdev->state.conn == C_SYNC_TARGET ||
  1401. mdev->state.conn == C_PAUSED_SYNC_S ||
  1402. mdev->state.conn == C_PAUSED_SYNC_T)
  1403. return true;
  1404. }
  1405. return false;
  1406. }
  1407. static bool conn_ov_running(struct drbd_tconn *tconn)
  1408. {
  1409. struct drbd_conf *mdev;
  1410. int vnr;
  1411. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  1412. if (mdev->state.conn == C_VERIFY_S ||
  1413. mdev->state.conn == C_VERIFY_T)
  1414. return true;
  1415. }
  1416. return false;
  1417. }
  1418. int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
  1419. {
  1420. enum drbd_ret_code retcode;
  1421. struct drbd_tconn *tconn;
  1422. struct net_conf *new_conf = NULL;
  1423. int err;
  1424. int ovr; /* online verify running */
  1425. int rsr; /* re-sync running */
  1426. struct crypto_hash *verify_tfm = NULL;
  1427. struct crypto_hash *csums_tfm = NULL;
  1428. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  1429. if (!adm_ctx.reply_skb)
  1430. return retcode;
  1431. if (retcode != NO_ERROR)
  1432. goto out;
  1433. tconn = adm_ctx.tconn;
  1434. new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
  1435. if (!new_conf) {
  1436. retcode = ERR_NOMEM;
  1437. goto out;
  1438. }
  1439. /* we also need a net config
  1440. * to change the options on */
  1441. if (!get_net_conf(tconn)) {
  1442. drbd_msg_put_info("net conf missing, try connect");
  1443. retcode = ERR_INVALID_REQUEST;
  1444. goto out;
  1445. }
  1446. conn_reconfig_start(tconn);
  1447. memcpy(new_conf, tconn->net_conf, sizeof(*new_conf));
  1448. err = net_conf_from_attrs_for_change(new_conf, info);
  1449. if (err) {
  1450. retcode = ERR_MANDATORY_TAG;
  1451. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1452. goto fail;
  1453. }
  1454. /* re-sync running */
  1455. rsr = conn_resync_running(tconn);
  1456. if (rsr && strcmp(new_conf->csums_alg, tconn->net_conf->csums_alg)) {
  1457. retcode = ERR_CSUMS_RESYNC_RUNNING;
  1458. goto fail;
  1459. }
  1460. if (!rsr && new_conf->csums_alg[0]) {
  1461. csums_tfm = crypto_alloc_hash(new_conf->csums_alg, 0, CRYPTO_ALG_ASYNC);
  1462. if (IS_ERR(csums_tfm)) {
  1463. csums_tfm = NULL;
  1464. retcode = ERR_CSUMS_ALG;
  1465. goto fail;
  1466. }
  1467. if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
  1468. retcode = ERR_CSUMS_ALG_ND;
  1469. goto fail;
  1470. }
  1471. }
  1472. /* online verify running */
  1473. ovr = conn_ov_running(tconn);
  1474. if (ovr) {
  1475. if (strcmp(new_conf->verify_alg, tconn->net_conf->verify_alg)) {
  1476. retcode = ERR_VERIFY_RUNNING;
  1477. goto fail;
  1478. }
  1479. }
  1480. if (!ovr && new_conf->verify_alg[0]) {
  1481. verify_tfm = crypto_alloc_hash(new_conf->verify_alg, 0, CRYPTO_ALG_ASYNC);
  1482. if (IS_ERR(verify_tfm)) {
  1483. verify_tfm = NULL;
  1484. retcode = ERR_VERIFY_ALG;
  1485. goto fail;
  1486. }
  1487. if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
  1488. retcode = ERR_VERIFY_ALG_ND;
  1489. goto fail;
  1490. }
  1491. }
  1492. /* For now, use struct assignment, not pointer assignment.
  1493. * We don't have any means to determine who might still
  1494. * keep a local alias into the struct,
  1495. * so we cannot just free it and hope for the best :(
  1496. * FIXME
  1497. * To avoid someone looking at a half-updated struct, we probably
  1498. * should have a rw-semaphor on net_conf and disk_conf.
  1499. */
  1500. *tconn->net_conf = *new_conf;
  1501. if (!rsr) {
  1502. crypto_free_hash(tconn->csums_tfm);
  1503. tconn->csums_tfm = csums_tfm;
  1504. csums_tfm = NULL;
  1505. }
  1506. if (!ovr) {
  1507. crypto_free_hash(tconn->verify_tfm);
  1508. tconn->verify_tfm = verify_tfm;
  1509. verify_tfm = NULL;
  1510. }
  1511. if (tconn->cstate >= C_WF_REPORT_PARAMS)
  1512. drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
  1513. fail:
  1514. crypto_free_hash(csums_tfm);
  1515. crypto_free_hash(verify_tfm);
  1516. kfree(new_conf);
  1517. put_net_conf(tconn);
  1518. conn_reconfig_done(tconn);
  1519. out:
  1520. drbd_adm_finish(info, retcode);
  1521. return 0;
  1522. }
  1523. int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
  1524. {
  1525. char hmac_name[CRYPTO_MAX_ALG_NAME];
  1526. struct drbd_conf *mdev;
  1527. struct net_conf *new_conf = NULL;
  1528. struct crypto_hash *tfm = NULL;
  1529. struct crypto_hash *integrity_w_tfm = NULL;
  1530. struct crypto_hash *integrity_r_tfm = NULL;
  1531. void *int_dig_in = NULL;
  1532. void *int_dig_vv = NULL;
  1533. struct drbd_tconn *oconn;
  1534. struct drbd_tconn *tconn;
  1535. struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
  1536. enum drbd_ret_code retcode;
  1537. int i;
  1538. int err;
  1539. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  1540. if (!adm_ctx.reply_skb)
  1541. return retcode;
  1542. if (retcode != NO_ERROR)
  1543. goto out;
  1544. tconn = adm_ctx.tconn;
  1545. conn_reconfig_start(tconn);
  1546. if (tconn->cstate > C_STANDALONE) {
  1547. retcode = ERR_NET_CONFIGURED;
  1548. goto fail;
  1549. }
  1550. /* allocation not in the IO path, cqueue thread context */
  1551. new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
  1552. if (!new_conf) {
  1553. retcode = ERR_NOMEM;
  1554. goto fail;
  1555. }
  1556. *new_conf = (struct net_conf) {
  1557. {}, 0, /* my_addr */
  1558. {}, 0, /* peer_addr */
  1559. {}, 0, /* shared_secret */
  1560. {}, 0, /* cram_hmac_alg */
  1561. {}, 0, /* integrity_alg */
  1562. {}, 0, /* verify_alg */
  1563. {}, 0, /* csums_alg */
  1564. DRBD_PROTOCOL_DEF, /* wire_protocol */
  1565. DRBD_CONNECT_INT_DEF, /* try_connect_int */
  1566. DRBD_TIMEOUT_DEF, /* timeout */
  1567. DRBD_PING_INT_DEF, /* ping_int */
  1568. DRBD_PING_TIMEO_DEF, /* ping_timeo */
  1569. DRBD_SNDBUF_SIZE_DEF, /* sndbuf_size */
  1570. DRBD_RCVBUF_SIZE_DEF, /* rcvbuf_size */
  1571. DRBD_KO_COUNT_DEF, /* ko_count */
  1572. DRBD_MAX_BUFFERS_DEF, /* max_buffers */
  1573. DRBD_MAX_EPOCH_SIZE_DEF, /* max_epoch_size */
  1574. DRBD_UNPLUG_WATERMARK_DEF, /* unplug_watermark */
  1575. DRBD_AFTER_SB_0P_DEF, /* after_sb_0p */
  1576. DRBD_AFTER_SB_1P_DEF, /* after_sb_1p */
  1577. DRBD_AFTER_SB_2P_DEF, /* after_sb_2p */
  1578. DRBD_RR_CONFLICT_DEF, /* rr_conflict */
  1579. DRBD_ON_CONGESTION_DEF, /* on_congestion */
  1580. DRBD_CONG_FILL_DEF, /* cong_fill */
  1581. DRBD_CONG_EXTENTS_DEF, /* cong_extents */
  1582. 0, /* two_primaries */
  1583. 0, /* want_lose */
  1584. 0, /* no_cork */
  1585. 0, /* always_asbp */
  1586. 0, /* dry_run */
  1587. 0, /* use_rle */
  1588. };
  1589. err = net_conf_from_attrs(new_conf, info);
  1590. if (err) {
  1591. retcode = ERR_MANDATORY_TAG;
  1592. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1593. goto fail;
  1594. }
  1595. if (new_conf->two_primaries
  1596. && (new_conf->wire_protocol != DRBD_PROT_C)) {
  1597. retcode = ERR_NOT_PROTO_C;
  1598. goto fail;
  1599. }
  1600. idr_for_each_entry(&tconn->volumes, mdev, i) {
  1601. if (get_ldev(mdev)) {
  1602. enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
  1603. put_ldev(mdev);
  1604. if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
  1605. retcode = ERR_STONITH_AND_PROT_A;
  1606. goto fail;
  1607. }
  1608. }
  1609. if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
  1610. retcode = ERR_DISCARD;
  1611. goto fail;
  1612. }
  1613. if (!mdev->bitmap) {
  1614. if(drbd_bm_init(mdev)) {
  1615. retcode = ERR_NOMEM;
  1616. goto fail;
  1617. }
  1618. }
  1619. }
  1620. if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
  1621. retcode = ERR_CONG_NOT_PROTO_A;
  1622. goto fail;
  1623. }
  1624. retcode = NO_ERROR;
  1625. new_my_addr = (struct sockaddr *)&new_conf->my_addr;
  1626. new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
  1627. /* No need to take drbd_cfg_mutex here. All reconfiguration is
  1628. * strictly serialized on genl_lock(). We are protected against
  1629. * concurrent reconfiguration/addition/deletion */
  1630. list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
  1631. if (oconn == tconn)
  1632. continue;
  1633. if (get_net_conf(oconn)) {
  1634. taken_addr = (struct sockaddr *)&oconn->net_conf->my_addr;
  1635. if (new_conf->my_addr_len == oconn->net_conf->my_addr_len &&
  1636. !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
  1637. retcode = ERR_LOCAL_ADDR;
  1638. taken_addr = (struct sockaddr *)&oconn->net_conf->peer_addr;
  1639. if (new_conf->peer_addr_len == oconn->net_conf->peer_addr_len &&
  1640. !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
  1641. retcode = ERR_PEER_ADDR;
  1642. put_net_conf(oconn);
  1643. if (retcode != NO_ERROR)
  1644. goto fail;
  1645. }
  1646. }
  1647. if (new_conf->cram_hmac_alg[0] != 0) {
  1648. snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
  1649. new_conf->cram_hmac_alg);
  1650. tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
  1651. if (IS_ERR(tfm)) {
  1652. tfm = NULL;
  1653. retcode = ERR_AUTH_ALG;
  1654. goto fail;
  1655. }
  1656. if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
  1657. retcode = ERR_AUTH_ALG_ND;
  1658. goto fail;
  1659. }
  1660. }
  1661. if (new_conf->integrity_alg[0]) {
  1662. integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1663. if (IS_ERR(integrity_w_tfm)) {
  1664. integrity_w_tfm = NULL;
  1665. retcode=ERR_INTEGRITY_ALG;
  1666. goto fail;
  1667. }
  1668. if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
  1669. retcode=ERR_INTEGRITY_ALG_ND;
  1670. goto fail;
  1671. }
  1672. integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
  1673. if (IS_ERR(integrity_r_tfm)) {
  1674. integrity_r_tfm = NULL;
  1675. retcode=ERR_INTEGRITY_ALG;
  1676. goto fail;
  1677. }
  1678. }
  1679. ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
  1680. /* allocation not in the IO path, cqueue thread context */
  1681. if (integrity_w_tfm) {
  1682. i = crypto_hash_digestsize(integrity_w_tfm);
  1683. int_dig_in = kmalloc(i, GFP_KERNEL);
  1684. if (!int_dig_in) {
  1685. retcode = ERR_NOMEM;
  1686. goto fail;
  1687. }
  1688. int_dig_vv = kmalloc(i, GFP_KERNEL);
  1689. if (!int_dig_vv) {
  1690. retcode = ERR_NOMEM;
  1691. goto fail;
  1692. }
  1693. }
  1694. conn_flush_workqueue(tconn);
  1695. spin_lock_irq(&tconn->req_lock);
  1696. if (tconn->net_conf != NULL) {
  1697. retcode = ERR_NET_CONFIGURED;
  1698. spin_unlock_irq(&tconn->req_lock);
  1699. goto fail;
  1700. }
  1701. tconn->net_conf = new_conf;
  1702. crypto_free_hash(tconn->cram_hmac_tfm);
  1703. tconn->cram_hmac_tfm = tfm;
  1704. crypto_free_hash(tconn->integrity_w_tfm);
  1705. tconn->integrity_w_tfm = integrity_w_tfm;
  1706. crypto_free_hash(tconn->integrity_r_tfm);
  1707. tconn->integrity_r_tfm = integrity_r_tfm;
  1708. kfree(tconn->int_dig_in);
  1709. kfree(tconn->int_dig_vv);
  1710. tconn->int_dig_in=int_dig_in;
  1711. tconn->int_dig_vv=int_dig_vv;
  1712. retcode = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
  1713. spin_unlock_irq(&tconn->req_lock);
  1714. idr_for_each_entry(&tconn->volumes, mdev, i) {
  1715. mdev->send_cnt = 0;
  1716. mdev->recv_cnt = 0;
  1717. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1718. }
  1719. conn_reconfig_done(tconn);
  1720. drbd_adm_finish(info, retcode);
  1721. return 0;
  1722. fail:
  1723. kfree(int_dig_in);
  1724. kfree(int_dig_vv);
  1725. crypto_free_hash(tfm);
  1726. crypto_free_hash(integrity_w_tfm);
  1727. crypto_free_hash(integrity_r_tfm);
  1728. kfree(new_conf);
  1729. conn_reconfig_done(tconn);
  1730. out:
  1731. drbd_adm_finish(info, retcode);
  1732. return 0;
  1733. }
  1734. static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
  1735. {
  1736. enum drbd_state_rv rv;
  1737. if (force) {
  1738. spin_lock_irq(&tconn->req_lock);
  1739. if (tconn->cstate >= C_WF_CONNECTION)
  1740. _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
  1741. spin_unlock_irq(&tconn->req_lock);
  1742. return SS_SUCCESS;
  1743. }
  1744. rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
  1745. switch (rv) {
  1746. case SS_NOTHING_TO_DO:
  1747. case SS_ALREADY_STANDALONE:
  1748. return SS_SUCCESS;
  1749. case SS_PRIMARY_NOP:
  1750. /* Our state checking code wants to see the peer outdated. */
  1751. rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
  1752. pdsk, D_OUTDATED), CS_VERBOSE);
  1753. break;
  1754. case SS_CW_FAILED_BY_PEER:
  1755. /* The peer probably wants to see us outdated. */
  1756. rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
  1757. disk, D_OUTDATED), 0);
  1758. if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
  1759. conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
  1760. rv = SS_SUCCESS;
  1761. }
  1762. break;
  1763. default:;
  1764. /* no special handling necessary */
  1765. }
  1766. return rv;
  1767. }
  1768. int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
  1769. {
  1770. struct disconnect_parms parms;
  1771. struct drbd_tconn *tconn;
  1772. enum drbd_state_rv rv;
  1773. enum drbd_ret_code retcode;
  1774. int err;
  1775. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  1776. if (!adm_ctx.reply_skb)
  1777. return retcode;
  1778. if (retcode != NO_ERROR)
  1779. goto fail;
  1780. tconn = adm_ctx.tconn;
  1781. memset(&parms, 0, sizeof(parms));
  1782. if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
  1783. err = disconnect_parms_from_attrs(&parms, info);
  1784. if (err) {
  1785. retcode = ERR_MANDATORY_TAG;
  1786. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1787. goto fail;
  1788. }
  1789. }
  1790. rv = conn_try_disconnect(tconn, parms.force_disconnect);
  1791. if (rv < SS_SUCCESS)
  1792. goto fail;
  1793. if (wait_event_interruptible(tconn->ping_wait,
  1794. tconn->cstate != C_DISCONNECTING)) {
  1795. /* Do not test for mdev->state.conn == C_STANDALONE, since
  1796. someone else might connect us in the mean time! */
  1797. retcode = ERR_INTR;
  1798. goto fail;
  1799. }
  1800. retcode = NO_ERROR;
  1801. fail:
  1802. drbd_adm_finish(info, retcode);
  1803. return 0;
  1804. }
  1805. void resync_after_online_grow(struct drbd_conf *mdev)
  1806. {
  1807. int iass; /* I am sync source */
  1808. dev_info(DEV, "Resync of new storage after online grow\n");
  1809. if (mdev->state.role != mdev->state.peer)
  1810. iass = (mdev->state.role == R_PRIMARY);
  1811. else
  1812. iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
  1813. if (iass)
  1814. drbd_start_resync(mdev, C_SYNC_SOURCE);
  1815. else
  1816. _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
  1817. }
  1818. int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
  1819. {
  1820. struct resize_parms rs;
  1821. struct drbd_conf *mdev;
  1822. enum drbd_ret_code retcode;
  1823. enum determine_dev_size dd;
  1824. enum dds_flags ddsf;
  1825. int err;
  1826. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1827. if (!adm_ctx.reply_skb)
  1828. return retcode;
  1829. if (retcode != NO_ERROR)
  1830. goto fail;
  1831. memset(&rs, 0, sizeof(struct resize_parms));
  1832. if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
  1833. err = resize_parms_from_attrs(&rs, info);
  1834. if (err) {
  1835. retcode = ERR_MANDATORY_TAG;
  1836. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1837. goto fail;
  1838. }
  1839. }
  1840. mdev = adm_ctx.mdev;
  1841. if (mdev->state.conn > C_CONNECTED) {
  1842. retcode = ERR_RESIZE_RESYNC;
  1843. goto fail;
  1844. }
  1845. if (mdev->state.role == R_SECONDARY &&
  1846. mdev->state.peer == R_SECONDARY) {
  1847. retcode = ERR_NO_PRIMARY;
  1848. goto fail;
  1849. }
  1850. if (!get_ldev(mdev)) {
  1851. retcode = ERR_NO_DISK;
  1852. goto fail;
  1853. }
  1854. if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
  1855. retcode = ERR_NEED_APV_93;
  1856. goto fail;
  1857. }
  1858. if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
  1859. mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
  1860. mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
  1861. ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
  1862. dd = drbd_determine_dev_size(mdev, ddsf);
  1863. drbd_md_sync(mdev);
  1864. put_ldev(mdev);
  1865. if (dd == dev_size_error) {
  1866. retcode = ERR_NOMEM_BITMAP;
  1867. goto fail;
  1868. }
  1869. if (mdev->state.conn == C_CONNECTED) {
  1870. if (dd == grew)
  1871. set_bit(RESIZE_PENDING, &mdev->flags);
  1872. drbd_send_uuids(mdev);
  1873. drbd_send_sizes(mdev, 1, ddsf);
  1874. }
  1875. fail:
  1876. drbd_adm_finish(info, retcode);
  1877. return 0;
  1878. }
  1879. int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
  1880. {
  1881. enum drbd_ret_code retcode;
  1882. cpumask_var_t new_cpu_mask;
  1883. struct drbd_tconn *tconn;
  1884. int *rs_plan_s = NULL;
  1885. struct res_opts sc;
  1886. int err;
  1887. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  1888. if (!adm_ctx.reply_skb)
  1889. return retcode;
  1890. if (retcode != NO_ERROR)
  1891. goto fail;
  1892. tconn = adm_ctx.tconn;
  1893. if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
  1894. retcode = ERR_NOMEM;
  1895. drbd_msg_put_info("unable to allocate cpumask");
  1896. goto fail;
  1897. }
  1898. if (((struct drbd_genlmsghdr*)info->userhdr)->flags
  1899. & DRBD_GENL_F_SET_DEFAULTS) {
  1900. memset(&sc, 0, sizeof(struct res_opts));
  1901. sc.on_no_data = DRBD_ON_NO_DATA_DEF;
  1902. } else
  1903. sc = tconn->res_opts;
  1904. err = res_opts_from_attrs(&sc, info);
  1905. if (err) {
  1906. retcode = ERR_MANDATORY_TAG;
  1907. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1908. goto fail;
  1909. }
  1910. /* silently ignore cpu mask on UP kernel */
  1911. if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
  1912. err = __bitmap_parse(sc.cpu_mask, 32, 0,
  1913. cpumask_bits(new_cpu_mask), nr_cpu_ids);
  1914. if (err) {
  1915. conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
  1916. retcode = ERR_CPU_MASK_PARSE;
  1917. goto fail;
  1918. }
  1919. }
  1920. tconn->res_opts = sc;
  1921. if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
  1922. cpumask_copy(tconn->cpu_mask, new_cpu_mask);
  1923. drbd_calc_cpu_mask(tconn);
  1924. tconn->receiver.reset_cpu_mask = 1;
  1925. tconn->asender.reset_cpu_mask = 1;
  1926. tconn->worker.reset_cpu_mask = 1;
  1927. }
  1928. fail:
  1929. kfree(rs_plan_s);
  1930. free_cpumask_var(new_cpu_mask);
  1931. drbd_adm_finish(info, retcode);
  1932. return 0;
  1933. }
  1934. int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
  1935. {
  1936. struct drbd_conf *mdev;
  1937. int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
  1938. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1939. if (!adm_ctx.reply_skb)
  1940. return retcode;
  1941. if (retcode != NO_ERROR)
  1942. goto out;
  1943. mdev = adm_ctx.mdev;
  1944. /* If there is still bitmap IO pending, probably because of a previous
  1945. * resync just being finished, wait for it before requesting a new resync. */
  1946. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  1947. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
  1948. if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
  1949. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1950. while (retcode == SS_NEED_CONNECTION) {
  1951. spin_lock_irq(&mdev->tconn->req_lock);
  1952. if (mdev->state.conn < C_CONNECTED)
  1953. retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
  1954. spin_unlock_irq(&mdev->tconn->req_lock);
  1955. if (retcode != SS_NEED_CONNECTION)
  1956. break;
  1957. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  1958. }
  1959. out:
  1960. drbd_adm_finish(info, retcode);
  1961. return 0;
  1962. }
  1963. static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
  1964. {
  1965. int rv;
  1966. rv = drbd_bmio_set_n_write(mdev);
  1967. drbd_suspend_al(mdev);
  1968. return rv;
  1969. }
  1970. static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
  1971. union drbd_state mask, union drbd_state val)
  1972. {
  1973. enum drbd_ret_code retcode;
  1974. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1975. if (!adm_ctx.reply_skb)
  1976. return retcode;
  1977. if (retcode != NO_ERROR)
  1978. goto out;
  1979. retcode = drbd_request_state(adm_ctx.mdev, mask, val);
  1980. out:
  1981. drbd_adm_finish(info, retcode);
  1982. return 0;
  1983. }
  1984. int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
  1985. {
  1986. return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
  1987. }
  1988. int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
  1989. {
  1990. enum drbd_ret_code retcode;
  1991. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1992. if (!adm_ctx.reply_skb)
  1993. return retcode;
  1994. if (retcode != NO_ERROR)
  1995. goto out;
  1996. if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
  1997. retcode = ERR_PAUSE_IS_SET;
  1998. out:
  1999. drbd_adm_finish(info, retcode);
  2000. return 0;
  2001. }
  2002. int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
  2003. {
  2004. union drbd_dev_state s;
  2005. enum drbd_ret_code retcode;
  2006. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2007. if (!adm_ctx.reply_skb)
  2008. return retcode;
  2009. if (retcode != NO_ERROR)
  2010. goto out;
  2011. if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
  2012. s = adm_ctx.mdev->state;
  2013. if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
  2014. retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
  2015. s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
  2016. } else {
  2017. retcode = ERR_PAUSE_IS_CLEAR;
  2018. }
  2019. }
  2020. out:
  2021. drbd_adm_finish(info, retcode);
  2022. return 0;
  2023. }
  2024. int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
  2025. {
  2026. return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
  2027. }
  2028. int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
  2029. {
  2030. struct drbd_conf *mdev;
  2031. int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
  2032. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2033. if (!adm_ctx.reply_skb)
  2034. return retcode;
  2035. if (retcode != NO_ERROR)
  2036. goto out;
  2037. mdev = adm_ctx.mdev;
  2038. if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
  2039. drbd_uuid_new_current(mdev);
  2040. clear_bit(NEW_CUR_UUID, &mdev->flags);
  2041. }
  2042. drbd_suspend_io(mdev);
  2043. retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
  2044. if (retcode == SS_SUCCESS) {
  2045. if (mdev->state.conn < C_CONNECTED)
  2046. tl_clear(mdev->tconn);
  2047. if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
  2048. tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
  2049. }
  2050. drbd_resume_io(mdev);
  2051. out:
  2052. drbd_adm_finish(info, retcode);
  2053. return 0;
  2054. }
  2055. int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
  2056. {
  2057. return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
  2058. }
  2059. int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
  2060. {
  2061. struct nlattr *nla;
  2062. nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
  2063. if (!nla)
  2064. goto nla_put_failure;
  2065. if (vnr != VOLUME_UNSPECIFIED)
  2066. NLA_PUT_U32(skb, T_ctx_volume, vnr);
  2067. NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
  2068. nla_nest_end(skb, nla);
  2069. return 0;
  2070. nla_put_failure:
  2071. if (nla)
  2072. nla_nest_cancel(skb, nla);
  2073. return -EMSGSIZE;
  2074. }
  2075. int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
  2076. const struct sib_info *sib)
  2077. {
  2078. struct state_info *si = NULL; /* for sizeof(si->member); */
  2079. struct nlattr *nla;
  2080. int got_ldev;
  2081. int got_net;
  2082. int err = 0;
  2083. int exclude_sensitive;
  2084. /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
  2085. * to. So we better exclude_sensitive information.
  2086. *
  2087. * If sib == NULL, this is drbd_adm_get_status, executed synchronously
  2088. * in the context of the requesting user process. Exclude sensitive
  2089. * information, unless current has superuser.
  2090. *
  2091. * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
  2092. * relies on the current implementation of netlink_dump(), which
  2093. * executes the dump callback successively from netlink_recvmsg(),
  2094. * always in the context of the receiving process */
  2095. exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
  2096. got_ldev = get_ldev(mdev);
  2097. got_net = get_net_conf(mdev->tconn);
  2098. /* We need to add connection name and volume number information still.
  2099. * Minor number is in drbd_genlmsghdr. */
  2100. if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
  2101. goto nla_put_failure;
  2102. if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
  2103. goto nla_put_failure;
  2104. if (got_ldev)
  2105. if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
  2106. goto nla_put_failure;
  2107. if (got_net)
  2108. if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive))
  2109. goto nla_put_failure;
  2110. nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
  2111. if (!nla)
  2112. goto nla_put_failure;
  2113. NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
  2114. NLA_PUT_U32(skb, T_current_state, mdev->state.i);
  2115. NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
  2116. NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
  2117. if (got_ldev) {
  2118. NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
  2119. NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
  2120. NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
  2121. NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
  2122. if (C_SYNC_SOURCE <= mdev->state.conn &&
  2123. C_PAUSED_SYNC_T >= mdev->state.conn) {
  2124. NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
  2125. NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
  2126. }
  2127. }
  2128. if (sib) {
  2129. switch(sib->sib_reason) {
  2130. case SIB_SYNC_PROGRESS:
  2131. case SIB_GET_STATUS_REPLY:
  2132. break;
  2133. case SIB_STATE_CHANGE:
  2134. NLA_PUT_U32(skb, T_prev_state, sib->os.i);
  2135. NLA_PUT_U32(skb, T_new_state, sib->ns.i);
  2136. break;
  2137. case SIB_HELPER_POST:
  2138. NLA_PUT_U32(skb,
  2139. T_helper_exit_code, sib->helper_exit_code);
  2140. /* fall through */
  2141. case SIB_HELPER_PRE:
  2142. NLA_PUT_STRING(skb, T_helper, sib->helper_name);
  2143. break;
  2144. }
  2145. }
  2146. nla_nest_end(skb, nla);
  2147. if (0)
  2148. nla_put_failure:
  2149. err = -EMSGSIZE;
  2150. if (got_ldev)
  2151. put_ldev(mdev);
  2152. if (got_net)
  2153. put_net_conf(mdev->tconn);
  2154. return err;
  2155. }
  2156. int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
  2157. {
  2158. enum drbd_ret_code retcode;
  2159. int err;
  2160. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2161. if (!adm_ctx.reply_skb)
  2162. return retcode;
  2163. if (retcode != NO_ERROR)
  2164. goto out;
  2165. err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
  2166. if (err) {
  2167. nlmsg_free(adm_ctx.reply_skb);
  2168. return err;
  2169. }
  2170. out:
  2171. drbd_adm_finish(info, retcode);
  2172. return 0;
  2173. }
  2174. int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
  2175. {
  2176. struct drbd_conf *mdev;
  2177. struct drbd_genlmsghdr *dh;
  2178. struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
  2179. struct drbd_tconn *tconn = NULL;
  2180. struct drbd_tconn *tmp;
  2181. unsigned volume = cb->args[1];
  2182. /* Open coded, deferred, iteration:
  2183. * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
  2184. * idr_for_each_entry(&tconn->volumes, mdev, i) {
  2185. * ...
  2186. * }
  2187. * }
  2188. * where tconn is cb->args[0];
  2189. * and i is cb->args[1];
  2190. *
  2191. * This may miss entries inserted after this dump started,
  2192. * or entries deleted before they are reached.
  2193. *
  2194. * We need to make sure the mdev won't disappear while
  2195. * we are looking at it, and revalidate our iterators
  2196. * on each iteration.
  2197. */
  2198. /* synchronize with drbd_new_tconn/drbd_free_tconn */
  2199. mutex_lock(&drbd_cfg_mutex);
  2200. /* synchronize with drbd_delete_device */
  2201. rcu_read_lock();
  2202. next_tconn:
  2203. /* revalidate iterator position */
  2204. list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
  2205. if (pos == NULL) {
  2206. /* first iteration */
  2207. pos = tmp;
  2208. tconn = pos;
  2209. break;
  2210. }
  2211. if (tmp == pos) {
  2212. tconn = pos;
  2213. break;
  2214. }
  2215. }
  2216. if (tconn) {
  2217. mdev = idr_get_next(&tconn->volumes, &volume);
  2218. if (!mdev) {
  2219. /* No more volumes to dump on this tconn.
  2220. * Advance tconn iterator. */
  2221. pos = list_entry(tconn->all_tconn.next,
  2222. struct drbd_tconn, all_tconn);
  2223. /* But, did we dump any volume on this tconn yet? */
  2224. if (volume != 0) {
  2225. tconn = NULL;
  2226. volume = 0;
  2227. goto next_tconn;
  2228. }
  2229. }
  2230. dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
  2231. cb->nlh->nlmsg_seq, &drbd_genl_family,
  2232. NLM_F_MULTI, DRBD_ADM_GET_STATUS);
  2233. if (!dh)
  2234. goto out;
  2235. if (!mdev) {
  2236. /* this is a tconn without a single volume */
  2237. dh->minor = -1U;
  2238. dh->ret_code = NO_ERROR;
  2239. if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
  2240. genlmsg_cancel(skb, dh);
  2241. else
  2242. genlmsg_end(skb, dh);
  2243. goto out;
  2244. }
  2245. D_ASSERT(mdev->vnr == volume);
  2246. D_ASSERT(mdev->tconn == tconn);
  2247. dh->minor = mdev_to_minor(mdev);
  2248. dh->ret_code = NO_ERROR;
  2249. if (nla_put_status_info(skb, mdev, NULL)) {
  2250. genlmsg_cancel(skb, dh);
  2251. goto out;
  2252. }
  2253. genlmsg_end(skb, dh);
  2254. }
  2255. out:
  2256. rcu_read_unlock();
  2257. mutex_unlock(&drbd_cfg_mutex);
  2258. /* where to start the next iteration */
  2259. cb->args[0] = (long)pos;
  2260. cb->args[1] = (pos == tconn) ? volume + 1 : 0;
  2261. /* No more tconns/volumes/minors found results in an empty skb.
  2262. * Which will terminate the dump. */
  2263. return skb->len;
  2264. }
  2265. int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
  2266. {
  2267. enum drbd_ret_code retcode;
  2268. struct timeout_parms tp;
  2269. int err;
  2270. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2271. if (!adm_ctx.reply_skb)
  2272. return retcode;
  2273. if (retcode != NO_ERROR)
  2274. goto out;
  2275. tp.timeout_type =
  2276. adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
  2277. test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
  2278. UT_DEFAULT;
  2279. err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
  2280. if (err) {
  2281. nlmsg_free(adm_ctx.reply_skb);
  2282. return err;
  2283. }
  2284. out:
  2285. drbd_adm_finish(info, retcode);
  2286. return 0;
  2287. }
  2288. int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
  2289. {
  2290. struct drbd_conf *mdev;
  2291. enum drbd_ret_code retcode;
  2292. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2293. if (!adm_ctx.reply_skb)
  2294. return retcode;
  2295. if (retcode != NO_ERROR)
  2296. goto out;
  2297. mdev = adm_ctx.mdev;
  2298. if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
  2299. /* resume from last known position, if possible */
  2300. struct start_ov_parms parms =
  2301. { .ov_start_sector = mdev->ov_start_sector };
  2302. int err = start_ov_parms_from_attrs(&parms, info);
  2303. if (err) {
  2304. retcode = ERR_MANDATORY_TAG;
  2305. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2306. goto out;
  2307. }
  2308. /* w_make_ov_request expects position to be aligned */
  2309. mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
  2310. }
  2311. /* If there is still bitmap IO pending, e.g. previous resync or verify
  2312. * just being finished, wait for it before requesting a new resync. */
  2313. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  2314. retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
  2315. out:
  2316. drbd_adm_finish(info, retcode);
  2317. return 0;
  2318. }
  2319. int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
  2320. {
  2321. struct drbd_conf *mdev;
  2322. enum drbd_ret_code retcode;
  2323. int skip_initial_sync = 0;
  2324. int err;
  2325. struct new_c_uuid_parms args;
  2326. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2327. if (!adm_ctx.reply_skb)
  2328. return retcode;
  2329. if (retcode != NO_ERROR)
  2330. goto out_nolock;
  2331. mdev = adm_ctx.mdev;
  2332. memset(&args, 0, sizeof(args));
  2333. if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
  2334. err = new_c_uuid_parms_from_attrs(&args, info);
  2335. if (err) {
  2336. retcode = ERR_MANDATORY_TAG;
  2337. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2338. goto out_nolock;
  2339. }
  2340. }
  2341. mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
  2342. if (!get_ldev(mdev)) {
  2343. retcode = ERR_NO_DISK;
  2344. goto out;
  2345. }
  2346. /* this is "skip initial sync", assume to be clean */
  2347. if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
  2348. mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
  2349. dev_info(DEV, "Preparing to skip initial sync\n");
  2350. skip_initial_sync = 1;
  2351. } else if (mdev->state.conn != C_STANDALONE) {
  2352. retcode = ERR_CONNECTED;
  2353. goto out_dec;
  2354. }
  2355. drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
  2356. drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
  2357. if (args.clear_bm) {
  2358. err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
  2359. "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
  2360. if (err) {
  2361. dev_err(DEV, "Writing bitmap failed with %d\n",err);
  2362. retcode = ERR_IO_MD_DISK;
  2363. }
  2364. if (skip_initial_sync) {
  2365. drbd_send_uuids_skip_initial_sync(mdev);
  2366. _drbd_uuid_set(mdev, UI_BITMAP, 0);
  2367. drbd_print_uuids(mdev, "cleared bitmap UUID");
  2368. spin_lock_irq(&mdev->tconn->req_lock);
  2369. _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
  2370. CS_VERBOSE, NULL);
  2371. spin_unlock_irq(&mdev->tconn->req_lock);
  2372. }
  2373. }
  2374. drbd_md_sync(mdev);
  2375. out_dec:
  2376. put_ldev(mdev);
  2377. out:
  2378. mutex_unlock(mdev->state_mutex);
  2379. out_nolock:
  2380. drbd_adm_finish(info, retcode);
  2381. return 0;
  2382. }
  2383. static enum drbd_ret_code
  2384. drbd_check_conn_name(const char *name)
  2385. {
  2386. if (!name || !name[0]) {
  2387. drbd_msg_put_info("connection name missing");
  2388. return ERR_MANDATORY_TAG;
  2389. }
  2390. /* if we want to use these in sysfs/configfs/debugfs some day,
  2391. * we must not allow slashes */
  2392. if (strchr(name, '/')) {
  2393. drbd_msg_put_info("invalid connection name");
  2394. return ERR_INVALID_REQUEST;
  2395. }
  2396. return NO_ERROR;
  2397. }
  2398. int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
  2399. {
  2400. enum drbd_ret_code retcode;
  2401. retcode = drbd_adm_prepare(skb, info, 0);
  2402. if (!adm_ctx.reply_skb)
  2403. return retcode;
  2404. if (retcode != NO_ERROR)
  2405. goto out;
  2406. retcode = drbd_check_conn_name(adm_ctx.conn_name);
  2407. if (retcode != NO_ERROR)
  2408. goto out;
  2409. if (adm_ctx.tconn) {
  2410. if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
  2411. retcode = ERR_INVALID_REQUEST;
  2412. drbd_msg_put_info("connection exists");
  2413. }
  2414. /* else: still NO_ERROR */
  2415. goto out;
  2416. }
  2417. if (!drbd_new_tconn(adm_ctx.conn_name))
  2418. retcode = ERR_NOMEM;
  2419. out:
  2420. drbd_adm_finish(info, retcode);
  2421. return 0;
  2422. }
  2423. int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
  2424. {
  2425. struct drbd_genlmsghdr *dh = info->userhdr;
  2426. enum drbd_ret_code retcode;
  2427. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  2428. if (!adm_ctx.reply_skb)
  2429. return retcode;
  2430. if (retcode != NO_ERROR)
  2431. goto out;
  2432. /* FIXME drop minor_count parameter, limit to MINORMASK */
  2433. if (dh->minor >= minor_count) {
  2434. drbd_msg_put_info("requested minor out of range");
  2435. retcode = ERR_INVALID_REQUEST;
  2436. goto out;
  2437. }
  2438. if (adm_ctx.volume > DRBD_VOLUME_MAX) {
  2439. drbd_msg_put_info("requested volume id out of range");
  2440. retcode = ERR_INVALID_REQUEST;
  2441. goto out;
  2442. }
  2443. /* drbd_adm_prepare made sure already
  2444. * that mdev->tconn and mdev->vnr match the request. */
  2445. if (adm_ctx.mdev) {
  2446. if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
  2447. retcode = ERR_MINOR_EXISTS;
  2448. /* else: still NO_ERROR */
  2449. goto out;
  2450. }
  2451. retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
  2452. out:
  2453. drbd_adm_finish(info, retcode);
  2454. return 0;
  2455. }
  2456. static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
  2457. {
  2458. if (mdev->state.disk == D_DISKLESS &&
  2459. /* no need to be mdev->state.conn == C_STANDALONE &&
  2460. * we may want to delete a minor from a live replication group.
  2461. */
  2462. mdev->state.role == R_SECONDARY) {
  2463. drbd_delete_device(mdev);
  2464. return NO_ERROR;
  2465. } else
  2466. return ERR_MINOR_CONFIGURED;
  2467. }
  2468. int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
  2469. {
  2470. enum drbd_ret_code retcode;
  2471. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2472. if (!adm_ctx.reply_skb)
  2473. return retcode;
  2474. if (retcode != NO_ERROR)
  2475. goto out;
  2476. mutex_lock(&drbd_cfg_mutex);
  2477. retcode = adm_delete_minor(adm_ctx.mdev);
  2478. mutex_unlock(&drbd_cfg_mutex);
  2479. /* if this was the last volume of this connection,
  2480. * this will terminate all threads */
  2481. if (retcode == NO_ERROR)
  2482. conn_reconfig_done(adm_ctx.tconn);
  2483. out:
  2484. drbd_adm_finish(info, retcode);
  2485. return 0;
  2486. }
  2487. int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
  2488. {
  2489. enum drbd_ret_code retcode;
  2490. enum drbd_state_rv rv;
  2491. struct drbd_conf *mdev;
  2492. unsigned i;
  2493. retcode = drbd_adm_prepare(skb, info, 0);
  2494. if (!adm_ctx.reply_skb)
  2495. return retcode;
  2496. if (retcode != NO_ERROR)
  2497. goto out;
  2498. if (!adm_ctx.tconn) {
  2499. retcode = ERR_CONN_NOT_KNOWN;
  2500. goto out;
  2501. }
  2502. mutex_lock(&drbd_cfg_mutex);
  2503. /* demote */
  2504. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2505. retcode = drbd_set_role(mdev, R_SECONDARY, 0);
  2506. if (retcode < SS_SUCCESS) {
  2507. drbd_msg_put_info("failed to demote");
  2508. goto out_unlock;
  2509. }
  2510. }
  2511. /* disconnect */
  2512. rv = conn_try_disconnect(adm_ctx.tconn, 0);
  2513. if (rv < SS_SUCCESS) {
  2514. retcode = rv; /* enum type mismatch! */
  2515. drbd_msg_put_info("failed to disconnect");
  2516. goto out_unlock;
  2517. }
  2518. /* detach */
  2519. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2520. rv = adm_detach(mdev);
  2521. if (rv < SS_SUCCESS) {
  2522. retcode = rv; /* enum type mismatch! */
  2523. drbd_msg_put_info("failed to detach");
  2524. goto out_unlock;
  2525. }
  2526. }
  2527. /* delete volumes */
  2528. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2529. retcode = adm_delete_minor(mdev);
  2530. if (retcode != NO_ERROR) {
  2531. /* "can not happen" */
  2532. drbd_msg_put_info("failed to delete volume");
  2533. goto out_unlock;
  2534. }
  2535. }
  2536. /* stop all threads */
  2537. conn_reconfig_done(adm_ctx.tconn);
  2538. /* delete connection */
  2539. if (conn_lowest_minor(adm_ctx.tconn) < 0) {
  2540. drbd_free_tconn(adm_ctx.tconn);
  2541. retcode = NO_ERROR;
  2542. } else {
  2543. /* "can not happen" */
  2544. retcode = ERR_CONN_IN_USE;
  2545. drbd_msg_put_info("failed to delete connection");
  2546. goto out_unlock;
  2547. }
  2548. out_unlock:
  2549. mutex_unlock(&drbd_cfg_mutex);
  2550. out:
  2551. drbd_adm_finish(info, retcode);
  2552. return 0;
  2553. }
  2554. int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
  2555. {
  2556. enum drbd_ret_code retcode;
  2557. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
  2558. if (!adm_ctx.reply_skb)
  2559. return retcode;
  2560. if (retcode != NO_ERROR)
  2561. goto out;
  2562. mutex_lock(&drbd_cfg_mutex);
  2563. if (conn_lowest_minor(adm_ctx.tconn) < 0) {
  2564. drbd_free_tconn(adm_ctx.tconn);
  2565. retcode = NO_ERROR;
  2566. } else {
  2567. retcode = ERR_CONN_IN_USE;
  2568. }
  2569. mutex_unlock(&drbd_cfg_mutex);
  2570. out:
  2571. drbd_adm_finish(info, retcode);
  2572. return 0;
  2573. }
  2574. void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
  2575. {
  2576. static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
  2577. struct sk_buff *msg;
  2578. struct drbd_genlmsghdr *d_out;
  2579. unsigned seq;
  2580. int err = -ENOMEM;
  2581. seq = atomic_inc_return(&drbd_genl_seq);
  2582. msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
  2583. if (!msg)
  2584. goto failed;
  2585. err = -EMSGSIZE;
  2586. d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
  2587. if (!d_out) /* cannot happen, but anyways. */
  2588. goto nla_put_failure;
  2589. d_out->minor = mdev_to_minor(mdev);
  2590. d_out->ret_code = 0;
  2591. if (nla_put_status_info(msg, mdev, sib))
  2592. goto nla_put_failure;
  2593. genlmsg_end(msg, d_out);
  2594. err = drbd_genl_multicast_events(msg, 0);
  2595. /* msg has been consumed or freed in netlink_broadcast() */
  2596. if (err && err != -ESRCH)
  2597. goto failed;
  2598. return;
  2599. nla_put_failure:
  2600. nlmsg_free(msg);
  2601. failed:
  2602. dev_err(DEV, "Error %d while broadcasting event. "
  2603. "Event seq:%u sib_reason:%u\n",
  2604. err, seq, sib->sib_reason);
  2605. }