drbd_nl.c 89 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295
  1. /*
  2. drbd_nl.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/drbd.h>
  21. #include <linux/in.h>
  22. #include <linux/fs.h>
  23. #include <linux/file.h>
  24. #include <linux/slab.h>
  25. #include <linux/blkpg.h>
  26. #include <linux/cpumask.h>
  27. #include "drbd_int.h"
  28. #include "drbd_req.h"
  29. #include "drbd_wrappers.h"
  30. #include <asm/unaligned.h>
  31. #include <linux/drbd_limits.h>
  32. #include <linux/kthread.h>
  33. #include <net/genetlink.h>
  34. /* .doit */
  35. // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
  36. // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
  37. int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
  38. int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
  39. int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
  40. int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
  41. int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
  42. int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
  43. int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
  44. int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
  45. int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
  46. int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
  47. int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
  48. int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
  49. int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
  50. int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
  51. int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
  52. int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
  53. int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
  54. int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
  55. int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
  56. int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
  57. int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
  58. int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
  59. int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
  60. int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
  61. int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
  62. /* .dumpit */
  63. int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
  64. #include <linux/drbd_genl_api.h>
  65. #include "drbd_nla.h"
  66. #include <linux/genl_magic_func.h>
  67. /* used blkdev_get_by_path, to claim our meta data device(s) */
  68. static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
  69. /* Configuration is strictly serialized, because generic netlink message
  70. * processing is strictly serialized by the genl_lock().
  71. * Which means we can use one static global drbd_config_context struct.
  72. */
  73. static struct drbd_config_context {
  74. /* assigned from drbd_genlmsghdr */
  75. unsigned int minor;
  76. /* assigned from request attributes, if present */
  77. unsigned int volume;
  78. #define VOLUME_UNSPECIFIED (-1U)
  79. /* pointer into the request skb,
  80. * limited lifetime! */
  81. char *resource_name;
  82. struct nlattr *my_addr;
  83. struct nlattr *peer_addr;
  84. /* reply buffer */
  85. struct sk_buff *reply_skb;
  86. /* pointer into reply buffer */
  87. struct drbd_genlmsghdr *reply_dh;
  88. /* resolved from attributes, if possible */
  89. struct drbd_conf *mdev;
  90. struct drbd_tconn *tconn;
  91. } adm_ctx;
  92. static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
  93. {
  94. genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
  95. if (genlmsg_reply(skb, info))
  96. printk(KERN_ERR "drbd: error sending genl reply\n");
  97. }
  98. /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
  99. * reason it could fail was no space in skb, and there are 4k available. */
  100. int drbd_msg_put_info(const char *info)
  101. {
  102. struct sk_buff *skb = adm_ctx.reply_skb;
  103. struct nlattr *nla;
  104. int err = -EMSGSIZE;
  105. if (!info || !info[0])
  106. return 0;
  107. nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
  108. if (!nla)
  109. return err;
  110. err = nla_put_string(skb, T_info_text, info);
  111. if (err) {
  112. nla_nest_cancel(skb, nla);
  113. return err;
  114. } else
  115. nla_nest_end(skb, nla);
  116. return 0;
  117. }
  118. /* This would be a good candidate for a "pre_doit" hook,
  119. * and per-family private info->pointers.
  120. * But we need to stay compatible with older kernels.
  121. * If it returns successfully, adm_ctx members are valid.
  122. */
  123. #define DRBD_ADM_NEED_MINOR 1
  124. #define DRBD_ADM_NEED_RESOURCE 2
  125. #define DRBD_ADM_NEED_CONNECTION 4
  126. static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
  127. unsigned flags)
  128. {
  129. struct drbd_genlmsghdr *d_in = info->userhdr;
  130. const u8 cmd = info->genlhdr->cmd;
  131. int err;
  132. memset(&adm_ctx, 0, sizeof(adm_ctx));
  133. /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
  134. if (cmd != DRBD_ADM_GET_STATUS
  135. && security_netlink_recv(skb, CAP_SYS_ADMIN))
  136. return -EPERM;
  137. adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  138. if (!adm_ctx.reply_skb) {
  139. err = -ENOMEM;
  140. goto fail;
  141. }
  142. adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
  143. info, &drbd_genl_family, 0, cmd);
  144. /* put of a few bytes into a fresh skb of >= 4k will always succeed.
  145. * but anyways */
  146. if (!adm_ctx.reply_dh) {
  147. err = -ENOMEM;
  148. goto fail;
  149. }
  150. adm_ctx.reply_dh->minor = d_in->minor;
  151. adm_ctx.reply_dh->ret_code = NO_ERROR;
  152. adm_ctx.volume = VOLUME_UNSPECIFIED;
  153. if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
  154. struct nlattr *nla;
  155. /* parse and validate only */
  156. err = drbd_cfg_context_from_attrs(NULL, info);
  157. if (err)
  158. goto fail;
  159. /* It was present, and valid,
  160. * copy it over to the reply skb. */
  161. err = nla_put_nohdr(adm_ctx.reply_skb,
  162. info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
  163. info->attrs[DRBD_NLA_CFG_CONTEXT]);
  164. if (err)
  165. goto fail;
  166. /* and assign stuff to the global adm_ctx */
  167. nla = nested_attr_tb[__nla_type(T_ctx_volume)];
  168. if (nla)
  169. adm_ctx.volume = nla_get_u32(nla);
  170. nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
  171. if (nla)
  172. adm_ctx.resource_name = nla_data(nla);
  173. adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
  174. adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
  175. if ((adm_ctx.my_addr &&
  176. nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
  177. (adm_ctx.peer_addr &&
  178. nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
  179. err = -EINVAL;
  180. goto fail;
  181. }
  182. }
  183. adm_ctx.minor = d_in->minor;
  184. adm_ctx.mdev = minor_to_mdev(d_in->minor);
  185. adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
  186. if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
  187. drbd_msg_put_info("unknown minor");
  188. return ERR_MINOR_INVALID;
  189. }
  190. if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
  191. drbd_msg_put_info("unknown resource");
  192. return ERR_INVALID_REQUEST;
  193. }
  194. if (flags & DRBD_ADM_NEED_CONNECTION) {
  195. if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
  196. drbd_msg_put_info("no resource name expected");
  197. return ERR_INVALID_REQUEST;
  198. }
  199. if (adm_ctx.mdev) {
  200. drbd_msg_put_info("no minor number expected");
  201. return ERR_INVALID_REQUEST;
  202. }
  203. if (adm_ctx.my_addr && adm_ctx.peer_addr)
  204. adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
  205. nla_len(adm_ctx.my_addr),
  206. nla_data(adm_ctx.peer_addr),
  207. nla_len(adm_ctx.peer_addr));
  208. if (!adm_ctx.tconn) {
  209. drbd_msg_put_info("unknown connection");
  210. return ERR_INVALID_REQUEST;
  211. }
  212. }
  213. /* some more paranoia, if the request was over-determined */
  214. if (adm_ctx.mdev && adm_ctx.tconn &&
  215. adm_ctx.mdev->tconn != adm_ctx.tconn) {
  216. pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
  217. adm_ctx.minor, adm_ctx.resource_name,
  218. adm_ctx.mdev->tconn->name);
  219. drbd_msg_put_info("minor exists in different resource");
  220. return ERR_INVALID_REQUEST;
  221. }
  222. if (adm_ctx.mdev &&
  223. adm_ctx.volume != VOLUME_UNSPECIFIED &&
  224. adm_ctx.volume != adm_ctx.mdev->vnr) {
  225. pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
  226. adm_ctx.minor, adm_ctx.volume,
  227. adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
  228. drbd_msg_put_info("minor exists as different volume");
  229. return ERR_INVALID_REQUEST;
  230. }
  231. return NO_ERROR;
  232. fail:
  233. nlmsg_free(adm_ctx.reply_skb);
  234. adm_ctx.reply_skb = NULL;
  235. return err;
  236. }
  237. static int drbd_adm_finish(struct genl_info *info, int retcode)
  238. {
  239. if (adm_ctx.tconn) {
  240. kref_put(&adm_ctx.tconn->kref, &conn_destroy);
  241. adm_ctx.tconn = NULL;
  242. }
  243. if (!adm_ctx.reply_skb)
  244. return -ENOMEM;
  245. adm_ctx.reply_dh->ret_code = retcode;
  246. drbd_adm_send_reply(adm_ctx.reply_skb, info);
  247. return 0;
  248. }
  249. static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
  250. {
  251. char *afs;
  252. /* FIXME: A future version will not allow this case. */
  253. if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
  254. return;
  255. switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
  256. case AF_INET6:
  257. afs = "ipv6";
  258. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
  259. &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
  260. break;
  261. case AF_INET:
  262. afs = "ipv4";
  263. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
  264. &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
  265. break;
  266. default:
  267. afs = "ssocks";
  268. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
  269. &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
  270. }
  271. snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
  272. }
  273. int drbd_khelper(struct drbd_conf *mdev, char *cmd)
  274. {
  275. char *envp[] = { "HOME=/",
  276. "TERM=linux",
  277. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  278. (char[20]) { }, /* address family */
  279. (char[60]) { }, /* address */
  280. NULL };
  281. char mb[12];
  282. char *argv[] = {usermode_helper, cmd, mb, NULL };
  283. struct sib_info sib;
  284. int ret;
  285. snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
  286. setup_khelper_env(mdev->tconn, envp);
  287. /* The helper may take some time.
  288. * write out any unsynced meta data changes now */
  289. drbd_md_sync(mdev);
  290. dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
  291. sib.sib_reason = SIB_HELPER_PRE;
  292. sib.helper_name = cmd;
  293. drbd_bcast_event(mdev, &sib);
  294. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  295. if (ret)
  296. dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  297. usermode_helper, cmd, mb,
  298. (ret >> 8) & 0xff, ret);
  299. else
  300. dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  301. usermode_helper, cmd, mb,
  302. (ret >> 8) & 0xff, ret);
  303. sib.sib_reason = SIB_HELPER_POST;
  304. sib.helper_exit_code = ret;
  305. drbd_bcast_event(mdev, &sib);
  306. if (ret < 0) /* Ignore any ERRNOs we got. */
  307. ret = 0;
  308. return ret;
  309. }
  310. static void conn_md_sync(struct drbd_tconn *tconn)
  311. {
  312. struct drbd_conf *mdev;
  313. int vnr;
  314. rcu_read_lock();
  315. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  316. kref_get(&mdev->kref);
  317. rcu_read_unlock();
  318. drbd_md_sync(mdev);
  319. kref_put(&mdev->kref, &drbd_minor_destroy);
  320. rcu_read_lock();
  321. }
  322. rcu_read_unlock();
  323. }
  324. int conn_khelper(struct drbd_tconn *tconn, char *cmd)
  325. {
  326. char *envp[] = { "HOME=/",
  327. "TERM=linux",
  328. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  329. (char[20]) { }, /* address family */
  330. (char[60]) { }, /* address */
  331. NULL };
  332. char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
  333. int ret;
  334. setup_khelper_env(tconn, envp);
  335. conn_md_sync(tconn);
  336. conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
  337. /* TODO: conn_bcast_event() ?? */
  338. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  339. if (ret)
  340. conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
  341. usermode_helper, cmd, tconn->name,
  342. (ret >> 8) & 0xff, ret);
  343. else
  344. conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
  345. usermode_helper, cmd, tconn->name,
  346. (ret >> 8) & 0xff, ret);
  347. /* TODO: conn_bcast_event() ?? */
  348. if (ret < 0) /* Ignore any ERRNOs we got. */
  349. ret = 0;
  350. return ret;
  351. }
  352. static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
  353. {
  354. enum drbd_fencing_p fp = FP_NOT_AVAIL;
  355. struct drbd_conf *mdev;
  356. int vnr;
  357. rcu_read_lock();
  358. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  359. if (get_ldev_if_state(mdev, D_CONSISTENT)) {
  360. fp = max_t(enum drbd_fencing_p, fp,
  361. rcu_dereference(mdev->ldev->disk_conf)->fencing);
  362. put_ldev(mdev);
  363. }
  364. }
  365. rcu_read_unlock();
  366. return fp;
  367. }
  368. bool conn_try_outdate_peer(struct drbd_tconn *tconn)
  369. {
  370. union drbd_state mask = { };
  371. union drbd_state val = { };
  372. enum drbd_fencing_p fp;
  373. char *ex_to_string;
  374. int r;
  375. if (tconn->cstate >= C_WF_REPORT_PARAMS) {
  376. conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
  377. return false;
  378. }
  379. fp = highest_fencing_policy(tconn);
  380. switch (fp) {
  381. case FP_NOT_AVAIL:
  382. conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
  383. goto out;
  384. case FP_DONT_CARE:
  385. return true;
  386. default: ;
  387. }
  388. r = conn_khelper(tconn, "fence-peer");
  389. switch ((r>>8) & 0xff) {
  390. case 3: /* peer is inconsistent */
  391. ex_to_string = "peer is inconsistent or worse";
  392. mask.pdsk = D_MASK;
  393. val.pdsk = D_INCONSISTENT;
  394. break;
  395. case 4: /* peer got outdated, or was already outdated */
  396. ex_to_string = "peer was fenced";
  397. mask.pdsk = D_MASK;
  398. val.pdsk = D_OUTDATED;
  399. break;
  400. case 5: /* peer was down */
  401. if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
  402. /* we will(have) create(d) a new UUID anyways... */
  403. ex_to_string = "peer is unreachable, assumed to be dead";
  404. mask.pdsk = D_MASK;
  405. val.pdsk = D_OUTDATED;
  406. } else {
  407. ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
  408. }
  409. break;
  410. case 6: /* Peer is primary, voluntarily outdate myself.
  411. * This is useful when an unconnected R_SECONDARY is asked to
  412. * become R_PRIMARY, but finds the other peer being active. */
  413. ex_to_string = "peer is active";
  414. conn_warn(tconn, "Peer is primary, outdating myself.\n");
  415. mask.disk = D_MASK;
  416. val.disk = D_OUTDATED;
  417. break;
  418. case 7:
  419. if (fp != FP_STONITH)
  420. conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
  421. ex_to_string = "peer was stonithed";
  422. mask.pdsk = D_MASK;
  423. val.pdsk = D_OUTDATED;
  424. break;
  425. default:
  426. /* The script is broken ... */
  427. conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
  428. return false; /* Eventually leave IO frozen */
  429. }
  430. conn_info(tconn, "fence-peer helper returned %d (%s)\n",
  431. (r>>8) & 0xff, ex_to_string);
  432. out:
  433. /* Not using
  434. conn_request_state(tconn, mask, val, CS_VERBOSE);
  435. here, because we might were able to re-establish the connection in the
  436. meantime. */
  437. spin_lock_irq(&tconn->req_lock);
  438. if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
  439. _conn_request_state(tconn, mask, val, CS_VERBOSE);
  440. spin_unlock_irq(&tconn->req_lock);
  441. return conn_highest_pdsk(tconn) <= D_OUTDATED;
  442. }
  443. static int _try_outdate_peer_async(void *data)
  444. {
  445. struct drbd_tconn *tconn = (struct drbd_tconn *)data;
  446. conn_try_outdate_peer(tconn);
  447. kref_put(&tconn->kref, &conn_destroy);
  448. return 0;
  449. }
  450. void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
  451. {
  452. struct task_struct *opa;
  453. kref_get(&tconn->kref);
  454. opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
  455. if (IS_ERR(opa)) {
  456. conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
  457. kref_put(&tconn->kref, &conn_destroy);
  458. }
  459. }
  460. enum drbd_state_rv
  461. drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
  462. {
  463. const int max_tries = 4;
  464. enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
  465. struct net_conf *nc;
  466. int try = 0;
  467. int forced = 0;
  468. union drbd_state mask, val;
  469. if (new_role == R_PRIMARY)
  470. request_ping(mdev->tconn); /* Detect a dead peer ASAP */
  471. mutex_lock(mdev->state_mutex);
  472. mask.i = 0; mask.role = R_MASK;
  473. val.i = 0; val.role = new_role;
  474. while (try++ < max_tries) {
  475. rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
  476. /* in case we first succeeded to outdate,
  477. * but now suddenly could establish a connection */
  478. if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
  479. val.pdsk = 0;
  480. mask.pdsk = 0;
  481. continue;
  482. }
  483. if (rv == SS_NO_UP_TO_DATE_DISK && force &&
  484. (mdev->state.disk < D_UP_TO_DATE &&
  485. mdev->state.disk >= D_INCONSISTENT)) {
  486. mask.disk = D_MASK;
  487. val.disk = D_UP_TO_DATE;
  488. forced = 1;
  489. continue;
  490. }
  491. if (rv == SS_NO_UP_TO_DATE_DISK &&
  492. mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
  493. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  494. if (conn_try_outdate_peer(mdev->tconn)) {
  495. val.disk = D_UP_TO_DATE;
  496. mask.disk = D_MASK;
  497. }
  498. continue;
  499. }
  500. if (rv == SS_NOTHING_TO_DO)
  501. goto out;
  502. if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
  503. if (!conn_try_outdate_peer(mdev->tconn) && force) {
  504. dev_warn(DEV, "Forced into split brain situation!\n");
  505. mask.pdsk = D_MASK;
  506. val.pdsk = D_OUTDATED;
  507. }
  508. continue;
  509. }
  510. if (rv == SS_TWO_PRIMARIES) {
  511. /* Maybe the peer is detected as dead very soon...
  512. retry at most once more in this case. */
  513. int timeo;
  514. rcu_read_lock();
  515. nc = rcu_dereference(mdev->tconn->net_conf);
  516. timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
  517. rcu_read_unlock();
  518. schedule_timeout_interruptible(timeo);
  519. if (try < max_tries)
  520. try = max_tries - 1;
  521. continue;
  522. }
  523. if (rv < SS_SUCCESS) {
  524. rv = _drbd_request_state(mdev, mask, val,
  525. CS_VERBOSE + CS_WAIT_COMPLETE);
  526. if (rv < SS_SUCCESS)
  527. goto out;
  528. }
  529. break;
  530. }
  531. if (rv < SS_SUCCESS)
  532. goto out;
  533. if (forced)
  534. dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
  535. /* Wait until nothing is on the fly :) */
  536. wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
  537. /* FIXME also wait for all pending P_BARRIER_ACK? */
  538. if (new_role == R_SECONDARY) {
  539. set_disk_ro(mdev->vdisk, true);
  540. if (get_ldev(mdev)) {
  541. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  542. put_ldev(mdev);
  543. }
  544. } else {
  545. mutex_lock(&mdev->tconn->conf_update);
  546. nc = mdev->tconn->net_conf;
  547. if (nc)
  548. nc->discard_my_data = 0; /* without copy; single bit op is atomic */
  549. mutex_unlock(&mdev->tconn->conf_update);
  550. set_disk_ro(mdev->vdisk, false);
  551. if (get_ldev(mdev)) {
  552. if (((mdev->state.conn < C_CONNECTED ||
  553. mdev->state.pdsk <= D_FAILED)
  554. && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
  555. drbd_uuid_new_current(mdev);
  556. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  557. put_ldev(mdev);
  558. }
  559. }
  560. /* writeout of activity log covered areas of the bitmap
  561. * to stable storage done in after state change already */
  562. if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
  563. /* if this was forced, we should consider sync */
  564. if (forced)
  565. drbd_send_uuids(mdev);
  566. drbd_send_current_state(mdev);
  567. }
  568. drbd_md_sync(mdev);
  569. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  570. out:
  571. mutex_unlock(mdev->state_mutex);
  572. return rv;
  573. }
  574. static const char *from_attrs_err_to_txt(int err)
  575. {
  576. return err == -ENOMSG ? "required attribute missing" :
  577. err == -EOPNOTSUPP ? "unknown mandatory attribute" :
  578. err == -EEXIST ? "can not change invariant setting" :
  579. "invalid attribute value";
  580. }
  581. int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
  582. {
  583. struct set_role_parms parms;
  584. int err;
  585. enum drbd_ret_code retcode;
  586. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  587. if (!adm_ctx.reply_skb)
  588. return retcode;
  589. if (retcode != NO_ERROR)
  590. goto out;
  591. memset(&parms, 0, sizeof(parms));
  592. if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
  593. err = set_role_parms_from_attrs(&parms, info);
  594. if (err) {
  595. retcode = ERR_MANDATORY_TAG;
  596. drbd_msg_put_info(from_attrs_err_to_txt(err));
  597. goto out;
  598. }
  599. }
  600. if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
  601. retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
  602. else
  603. retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
  604. out:
  605. drbd_adm_finish(info, retcode);
  606. return 0;
  607. }
  608. /* initializes the md.*_offset members, so we are able to find
  609. * the on disk meta data */
  610. static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
  611. struct drbd_backing_dev *bdev)
  612. {
  613. sector_t md_size_sect = 0;
  614. int meta_dev_idx;
  615. rcu_read_lock();
  616. meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
  617. switch (meta_dev_idx) {
  618. default:
  619. /* v07 style fixed size indexed meta data */
  620. bdev->md.md_size_sect = MD_RESERVED_SECT;
  621. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  622. bdev->md.al_offset = MD_AL_OFFSET;
  623. bdev->md.bm_offset = MD_BM_OFFSET;
  624. break;
  625. case DRBD_MD_INDEX_FLEX_EXT:
  626. /* just occupy the full device; unit: sectors */
  627. bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
  628. bdev->md.md_offset = 0;
  629. bdev->md.al_offset = MD_AL_OFFSET;
  630. bdev->md.bm_offset = MD_BM_OFFSET;
  631. break;
  632. case DRBD_MD_INDEX_INTERNAL:
  633. case DRBD_MD_INDEX_FLEX_INT:
  634. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  635. /* al size is still fixed */
  636. bdev->md.al_offset = -MD_AL_SECTORS;
  637. /* we need (slightly less than) ~ this much bitmap sectors: */
  638. md_size_sect = drbd_get_capacity(bdev->backing_bdev);
  639. md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
  640. md_size_sect = BM_SECT_TO_EXT(md_size_sect);
  641. md_size_sect = ALIGN(md_size_sect, 8);
  642. /* plus the "drbd meta data super block",
  643. * and the activity log; */
  644. md_size_sect += MD_BM_OFFSET;
  645. bdev->md.md_size_sect = md_size_sect;
  646. /* bitmap offset is adjusted by 'super' block size */
  647. bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
  648. break;
  649. }
  650. rcu_read_unlock();
  651. }
  652. /* input size is expected to be in KB */
  653. char *ppsize(char *buf, unsigned long long size)
  654. {
  655. /* Needs 9 bytes at max including trailing NUL:
  656. * -1ULL ==> "16384 EB" */
  657. static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
  658. int base = 0;
  659. while (size >= 10000 && base < sizeof(units)-1) {
  660. /* shift + round */
  661. size = (size >> 10) + !!(size & (1<<9));
  662. base++;
  663. }
  664. sprintf(buf, "%u %cB", (unsigned)size, units[base]);
  665. return buf;
  666. }
  667. /* there is still a theoretical deadlock when called from receiver
  668. * on an D_INCONSISTENT R_PRIMARY:
  669. * remote READ does inc_ap_bio, receiver would need to receive answer
  670. * packet from remote to dec_ap_bio again.
  671. * receiver receive_sizes(), comes here,
  672. * waits for ap_bio_cnt == 0. -> deadlock.
  673. * but this cannot happen, actually, because:
  674. * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
  675. * (not connected, or bad/no disk on peer):
  676. * see drbd_fail_request_early, ap_bio_cnt is zero.
  677. * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
  678. * peer may not initiate a resize.
  679. */
  680. /* Note these are not to be confused with
  681. * drbd_adm_suspend_io/drbd_adm_resume_io,
  682. * which are (sub) state changes triggered by admin (drbdsetup),
  683. * and can be long lived.
  684. * This changes an mdev->flag, is triggered by drbd internals,
  685. * and should be short-lived. */
  686. void drbd_suspend_io(struct drbd_conf *mdev)
  687. {
  688. set_bit(SUSPEND_IO, &mdev->flags);
  689. if (drbd_suspended(mdev))
  690. return;
  691. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
  692. }
  693. void drbd_resume_io(struct drbd_conf *mdev)
  694. {
  695. clear_bit(SUSPEND_IO, &mdev->flags);
  696. wake_up(&mdev->misc_wait);
  697. }
  698. /**
  699. * drbd_determine_dev_size() - Sets the right device size obeying all constraints
  700. * @mdev: DRBD device.
  701. *
  702. * Returns 0 on success, negative return values indicate errors.
  703. * You should call drbd_md_sync() after calling this function.
  704. */
  705. enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
  706. {
  707. sector_t prev_first_sect, prev_size; /* previous meta location */
  708. sector_t la_size, u_size;
  709. sector_t size;
  710. char ppb[10];
  711. int md_moved, la_size_changed;
  712. enum determine_dev_size rv = unchanged;
  713. /* race:
  714. * application request passes inc_ap_bio,
  715. * but then cannot get an AL-reference.
  716. * this function later may wait on ap_bio_cnt == 0. -> deadlock.
  717. *
  718. * to avoid that:
  719. * Suspend IO right here.
  720. * still lock the act_log to not trigger ASSERTs there.
  721. */
  722. drbd_suspend_io(mdev);
  723. /* no wait necessary anymore, actually we could assert that */
  724. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  725. prev_first_sect = drbd_md_first_sector(mdev->ldev);
  726. prev_size = mdev->ldev->md.md_size_sect;
  727. la_size = mdev->ldev->md.la_size_sect;
  728. /* TODO: should only be some assert here, not (re)init... */
  729. drbd_md_set_sector_offsets(mdev, mdev->ldev);
  730. rcu_read_lock();
  731. u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
  732. rcu_read_unlock();
  733. size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
  734. if (drbd_get_capacity(mdev->this_bdev) != size ||
  735. drbd_bm_capacity(mdev) != size) {
  736. int err;
  737. err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
  738. if (unlikely(err)) {
  739. /* currently there is only one error: ENOMEM! */
  740. size = drbd_bm_capacity(mdev)>>1;
  741. if (size == 0) {
  742. dev_err(DEV, "OUT OF MEMORY! "
  743. "Could not allocate bitmap!\n");
  744. } else {
  745. dev_err(DEV, "BM resizing failed. "
  746. "Leaving size unchanged at size = %lu KB\n",
  747. (unsigned long)size);
  748. }
  749. rv = dev_size_error;
  750. }
  751. /* racy, see comments above. */
  752. drbd_set_my_capacity(mdev, size);
  753. mdev->ldev->md.la_size_sect = size;
  754. dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
  755. (unsigned long long)size>>1);
  756. }
  757. if (rv == dev_size_error)
  758. goto out;
  759. la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
  760. md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
  761. || prev_size != mdev->ldev->md.md_size_sect;
  762. if (la_size_changed || md_moved) {
  763. int err;
  764. drbd_al_shrink(mdev); /* All extents inactive. */
  765. dev_info(DEV, "Writing the whole bitmap, %s\n",
  766. la_size_changed && md_moved ? "size changed and md moved" :
  767. la_size_changed ? "size changed" : "md moved");
  768. /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
  769. err = drbd_bitmap_io(mdev, &drbd_bm_write,
  770. "size changed", BM_LOCKED_MASK);
  771. if (err) {
  772. rv = dev_size_error;
  773. goto out;
  774. }
  775. drbd_md_mark_dirty(mdev);
  776. }
  777. if (size > la_size)
  778. rv = grew;
  779. if (size < la_size)
  780. rv = shrunk;
  781. out:
  782. lc_unlock(mdev->act_log);
  783. wake_up(&mdev->al_wait);
  784. drbd_resume_io(mdev);
  785. return rv;
  786. }
  787. sector_t
  788. drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
  789. sector_t u_size, int assume_peer_has_space)
  790. {
  791. sector_t p_size = mdev->p_size; /* partner's disk size. */
  792. sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
  793. sector_t m_size; /* my size */
  794. sector_t size = 0;
  795. m_size = drbd_get_max_capacity(bdev);
  796. if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
  797. dev_warn(DEV, "Resize while not connected was forced by the user!\n");
  798. p_size = m_size;
  799. }
  800. if (p_size && m_size) {
  801. size = min_t(sector_t, p_size, m_size);
  802. } else {
  803. if (la_size) {
  804. size = la_size;
  805. if (m_size && m_size < size)
  806. size = m_size;
  807. if (p_size && p_size < size)
  808. size = p_size;
  809. } else {
  810. if (m_size)
  811. size = m_size;
  812. if (p_size)
  813. size = p_size;
  814. }
  815. }
  816. if (size == 0)
  817. dev_err(DEV, "Both nodes diskless!\n");
  818. if (u_size) {
  819. if (u_size > size)
  820. dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
  821. (unsigned long)u_size>>1, (unsigned long)size>>1);
  822. else
  823. size = u_size;
  824. }
  825. return size;
  826. }
  827. /**
  828. * drbd_check_al_size() - Ensures that the AL is of the right size
  829. * @mdev: DRBD device.
  830. *
  831. * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
  832. * failed, and 0 on success. You should call drbd_md_sync() after you called
  833. * this function.
  834. */
  835. static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
  836. {
  837. struct lru_cache *n, *t;
  838. struct lc_element *e;
  839. unsigned int in_use;
  840. int i;
  841. if (mdev->act_log &&
  842. mdev->act_log->nr_elements == dc->al_extents)
  843. return 0;
  844. in_use = 0;
  845. t = mdev->act_log;
  846. n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
  847. dc->al_extents, sizeof(struct lc_element), 0);
  848. if (n == NULL) {
  849. dev_err(DEV, "Cannot allocate act_log lru!\n");
  850. return -ENOMEM;
  851. }
  852. spin_lock_irq(&mdev->al_lock);
  853. if (t) {
  854. for (i = 0; i < t->nr_elements; i++) {
  855. e = lc_element_by_index(t, i);
  856. if (e->refcnt)
  857. dev_err(DEV, "refcnt(%d)==%d\n",
  858. e->lc_number, e->refcnt);
  859. in_use += e->refcnt;
  860. }
  861. }
  862. if (!in_use)
  863. mdev->act_log = n;
  864. spin_unlock_irq(&mdev->al_lock);
  865. if (in_use) {
  866. dev_err(DEV, "Activity log still in use!\n");
  867. lc_destroy(n);
  868. return -EBUSY;
  869. } else {
  870. if (t)
  871. lc_destroy(t);
  872. }
  873. drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
  874. return 0;
  875. }
  876. static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
  877. {
  878. struct request_queue * const q = mdev->rq_queue;
  879. int max_hw_sectors = max_bio_size >> 9;
  880. int max_segments = 0;
  881. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  882. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  883. max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
  884. rcu_read_lock();
  885. max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
  886. rcu_read_unlock();
  887. put_ldev(mdev);
  888. }
  889. blk_queue_logical_block_size(q, 512);
  890. blk_queue_max_hw_sectors(q, max_hw_sectors);
  891. /* This is the workaround for "bio would need to, but cannot, be split" */
  892. blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
  893. blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
  894. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  895. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  896. blk_queue_stack_limits(q, b);
  897. if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
  898. dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
  899. q->backing_dev_info.ra_pages,
  900. b->backing_dev_info.ra_pages);
  901. q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
  902. }
  903. put_ldev(mdev);
  904. }
  905. }
  906. void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
  907. {
  908. int now, new, local, peer;
  909. now = queue_max_hw_sectors(mdev->rq_queue) << 9;
  910. local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
  911. peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
  912. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  913. local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
  914. mdev->local_max_bio_size = local;
  915. put_ldev(mdev);
  916. }
  917. /* We may ignore peer limits if the peer is modern enough.
  918. Because new from 8.3.8 onwards the peer can use multiple
  919. BIOs for a single peer_request */
  920. if (mdev->state.conn >= C_CONNECTED) {
  921. if (mdev->tconn->agreed_pro_version < 94)
  922. peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
  923. /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
  924. else if (mdev->tconn->agreed_pro_version == 94)
  925. peer = DRBD_MAX_SIZE_H80_PACKET;
  926. else if (mdev->tconn->agreed_pro_version < 100)
  927. peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
  928. else
  929. peer = DRBD_MAX_BIO_SIZE;
  930. }
  931. new = min_t(int, local, peer);
  932. if (mdev->state.role == R_PRIMARY && new < now)
  933. dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
  934. if (new != now)
  935. dev_info(DEV, "max BIO size = %u\n", new);
  936. drbd_setup_queue_param(mdev, new);
  937. }
  938. /* Starts the worker thread */
  939. static void conn_reconfig_start(struct drbd_tconn *tconn)
  940. {
  941. drbd_thread_start(&tconn->worker);
  942. conn_flush_workqueue(tconn);
  943. }
  944. /* if still unconfigured, stops worker again. */
  945. static void conn_reconfig_done(struct drbd_tconn *tconn)
  946. {
  947. bool stop_threads;
  948. spin_lock_irq(&tconn->req_lock);
  949. stop_threads = conn_all_vols_unconf(tconn) &&
  950. tconn->cstate == C_STANDALONE;
  951. spin_unlock_irq(&tconn->req_lock);
  952. if (stop_threads) {
  953. /* asender is implicitly stopped by receiver
  954. * in conn_disconnect() */
  955. drbd_thread_stop(&tconn->receiver);
  956. drbd_thread_stop(&tconn->worker);
  957. }
  958. }
  959. /* Make sure IO is suspended before calling this function(). */
  960. static void drbd_suspend_al(struct drbd_conf *mdev)
  961. {
  962. int s = 0;
  963. if (!lc_try_lock(mdev->act_log)) {
  964. dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
  965. return;
  966. }
  967. drbd_al_shrink(mdev);
  968. spin_lock_irq(&mdev->tconn->req_lock);
  969. if (mdev->state.conn < C_CONNECTED)
  970. s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
  971. spin_unlock_irq(&mdev->tconn->req_lock);
  972. lc_unlock(mdev->act_log);
  973. if (s)
  974. dev_info(DEV, "Suspended AL updates\n");
  975. }
  976. static bool should_set_defaults(struct genl_info *info)
  977. {
  978. unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
  979. return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
  980. }
  981. static void enforce_disk_conf_limits(struct disk_conf *dc)
  982. {
  983. if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
  984. dc->al_extents = DRBD_AL_EXTENTS_MIN;
  985. if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
  986. dc->al_extents = DRBD_AL_EXTENTS_MAX;
  987. if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
  988. dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
  989. }
  990. int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
  991. {
  992. enum drbd_ret_code retcode;
  993. struct drbd_conf *mdev;
  994. struct disk_conf *new_disk_conf, *old_disk_conf;
  995. struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
  996. int err, fifo_size;
  997. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  998. if (!adm_ctx.reply_skb)
  999. return retcode;
  1000. if (retcode != NO_ERROR)
  1001. goto out;
  1002. mdev = adm_ctx.mdev;
  1003. /* we also need a disk
  1004. * to change the options on */
  1005. if (!get_ldev(mdev)) {
  1006. retcode = ERR_NO_DISK;
  1007. goto out;
  1008. }
  1009. new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
  1010. if (!new_disk_conf) {
  1011. retcode = ERR_NOMEM;
  1012. goto fail;
  1013. }
  1014. mutex_lock(&mdev->tconn->conf_update);
  1015. old_disk_conf = mdev->ldev->disk_conf;
  1016. *new_disk_conf = *old_disk_conf;
  1017. if (should_set_defaults(info))
  1018. set_disk_conf_defaults(new_disk_conf);
  1019. err = disk_conf_from_attrs_for_change(new_disk_conf, info);
  1020. if (err && err != -ENOMSG) {
  1021. retcode = ERR_MANDATORY_TAG;
  1022. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1023. }
  1024. if (!expect(new_disk_conf->resync_rate >= 1))
  1025. new_disk_conf->resync_rate = 1;
  1026. enforce_disk_conf_limits(new_disk_conf);
  1027. fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
  1028. if (fifo_size != mdev->rs_plan_s->size) {
  1029. new_plan = fifo_alloc(fifo_size);
  1030. if (!new_plan) {
  1031. dev_err(DEV, "kmalloc of fifo_buffer failed");
  1032. retcode = ERR_NOMEM;
  1033. goto fail_unlock;
  1034. }
  1035. }
  1036. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  1037. drbd_al_shrink(mdev);
  1038. err = drbd_check_al_size(mdev, new_disk_conf);
  1039. lc_unlock(mdev->act_log);
  1040. wake_up(&mdev->al_wait);
  1041. if (err) {
  1042. retcode = ERR_NOMEM;
  1043. goto fail_unlock;
  1044. }
  1045. write_lock_irq(&global_state_lock);
  1046. retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
  1047. if (retcode == NO_ERROR) {
  1048. rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
  1049. drbd_resync_after_changed(mdev);
  1050. }
  1051. write_unlock_irq(&global_state_lock);
  1052. if (retcode != NO_ERROR)
  1053. goto fail_unlock;
  1054. if (new_plan) {
  1055. old_plan = mdev->rs_plan_s;
  1056. rcu_assign_pointer(mdev->rs_plan_s, new_plan);
  1057. }
  1058. mutex_unlock(&mdev->tconn->conf_update);
  1059. if (new_disk_conf->al_updates)
  1060. mdev->ldev->md.flags &= MDF_AL_DISABLED;
  1061. else
  1062. mdev->ldev->md.flags |= MDF_AL_DISABLED;
  1063. drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
  1064. drbd_md_sync(mdev);
  1065. if (mdev->state.conn >= C_CONNECTED)
  1066. drbd_send_sync_param(mdev);
  1067. synchronize_rcu();
  1068. kfree(old_disk_conf);
  1069. kfree(old_plan);
  1070. mod_timer(&mdev->request_timer, jiffies + HZ);
  1071. goto success;
  1072. fail_unlock:
  1073. mutex_unlock(&mdev->tconn->conf_update);
  1074. fail:
  1075. kfree(new_disk_conf);
  1076. kfree(new_plan);
  1077. success:
  1078. put_ldev(mdev);
  1079. out:
  1080. drbd_adm_finish(info, retcode);
  1081. return 0;
  1082. }
  1083. int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
  1084. {
  1085. struct drbd_conf *mdev;
  1086. int err;
  1087. enum drbd_ret_code retcode;
  1088. enum determine_dev_size dd;
  1089. sector_t max_possible_sectors;
  1090. sector_t min_md_device_sectors;
  1091. struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
  1092. struct disk_conf *new_disk_conf = NULL;
  1093. struct block_device *bdev;
  1094. struct lru_cache *resync_lru = NULL;
  1095. struct fifo_buffer *new_plan = NULL;
  1096. union drbd_state ns, os;
  1097. enum drbd_state_rv rv;
  1098. struct net_conf *nc;
  1099. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1100. if (!adm_ctx.reply_skb)
  1101. return retcode;
  1102. if (retcode != NO_ERROR)
  1103. goto finish;
  1104. mdev = adm_ctx.mdev;
  1105. conn_reconfig_start(mdev->tconn);
  1106. /* if you want to reconfigure, please tear down first */
  1107. if (mdev->state.disk > D_DISKLESS) {
  1108. retcode = ERR_DISK_CONFIGURED;
  1109. goto fail;
  1110. }
  1111. /* It may just now have detached because of IO error. Make sure
  1112. * drbd_ldev_destroy is done already, we may end up here very fast,
  1113. * e.g. if someone calls attach from the on-io-error handler,
  1114. * to realize a "hot spare" feature (not that I'd recommend that) */
  1115. wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
  1116. /* allocation not in the IO path, drbdsetup context */
  1117. nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
  1118. if (!nbc) {
  1119. retcode = ERR_NOMEM;
  1120. goto fail;
  1121. }
  1122. new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
  1123. if (!new_disk_conf) {
  1124. retcode = ERR_NOMEM;
  1125. goto fail;
  1126. }
  1127. nbc->disk_conf = new_disk_conf;
  1128. set_disk_conf_defaults(new_disk_conf);
  1129. err = disk_conf_from_attrs(new_disk_conf, info);
  1130. if (err) {
  1131. retcode = ERR_MANDATORY_TAG;
  1132. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1133. goto fail;
  1134. }
  1135. enforce_disk_conf_limits(new_disk_conf);
  1136. new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
  1137. if (!new_plan) {
  1138. retcode = ERR_NOMEM;
  1139. goto fail;
  1140. }
  1141. if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
  1142. retcode = ERR_MD_IDX_INVALID;
  1143. goto fail;
  1144. }
  1145. rcu_read_lock();
  1146. nc = rcu_dereference(mdev->tconn->net_conf);
  1147. if (nc) {
  1148. if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
  1149. rcu_read_unlock();
  1150. retcode = ERR_STONITH_AND_PROT_A;
  1151. goto fail;
  1152. }
  1153. }
  1154. rcu_read_unlock();
  1155. bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
  1156. FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
  1157. if (IS_ERR(bdev)) {
  1158. dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
  1159. PTR_ERR(bdev));
  1160. retcode = ERR_OPEN_DISK;
  1161. goto fail;
  1162. }
  1163. nbc->backing_bdev = bdev;
  1164. /*
  1165. * meta_dev_idx >= 0: external fixed size, possibly multiple
  1166. * drbd sharing one meta device. TODO in that case, paranoia
  1167. * check that [md_bdev, meta_dev_idx] is not yet used by some
  1168. * other drbd minor! (if you use drbd.conf + drbdadm, that
  1169. * should check it for you already; but if you don't, or
  1170. * someone fooled it, we need to double check here)
  1171. */
  1172. bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
  1173. FMODE_READ | FMODE_WRITE | FMODE_EXCL,
  1174. (new_disk_conf->meta_dev_idx < 0) ?
  1175. (void *)mdev : (void *)drbd_m_holder);
  1176. if (IS_ERR(bdev)) {
  1177. dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
  1178. PTR_ERR(bdev));
  1179. retcode = ERR_OPEN_MD_DISK;
  1180. goto fail;
  1181. }
  1182. nbc->md_bdev = bdev;
  1183. if ((nbc->backing_bdev == nbc->md_bdev) !=
  1184. (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
  1185. new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
  1186. retcode = ERR_MD_IDX_INVALID;
  1187. goto fail;
  1188. }
  1189. resync_lru = lc_create("resync", drbd_bm_ext_cache,
  1190. 1, 61, sizeof(struct bm_extent),
  1191. offsetof(struct bm_extent, lce));
  1192. if (!resync_lru) {
  1193. retcode = ERR_NOMEM;
  1194. goto fail;
  1195. }
  1196. /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
  1197. drbd_md_set_sector_offsets(mdev, nbc);
  1198. if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
  1199. dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
  1200. (unsigned long long) drbd_get_max_capacity(nbc),
  1201. (unsigned long long) new_disk_conf->disk_size);
  1202. retcode = ERR_DISK_TOO_SMALL;
  1203. goto fail;
  1204. }
  1205. if (new_disk_conf->meta_dev_idx < 0) {
  1206. max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
  1207. /* at least one MB, otherwise it does not make sense */
  1208. min_md_device_sectors = (2<<10);
  1209. } else {
  1210. max_possible_sectors = DRBD_MAX_SECTORS;
  1211. min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
  1212. }
  1213. if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
  1214. retcode = ERR_MD_DISK_TOO_SMALL;
  1215. dev_warn(DEV, "refusing attach: md-device too small, "
  1216. "at least %llu sectors needed for this meta-disk type\n",
  1217. (unsigned long long) min_md_device_sectors);
  1218. goto fail;
  1219. }
  1220. /* Make sure the new disk is big enough
  1221. * (we may currently be R_PRIMARY with no local disk...) */
  1222. if (drbd_get_max_capacity(nbc) <
  1223. drbd_get_capacity(mdev->this_bdev)) {
  1224. retcode = ERR_DISK_TOO_SMALL;
  1225. goto fail;
  1226. }
  1227. nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
  1228. if (nbc->known_size > max_possible_sectors) {
  1229. dev_warn(DEV, "==> truncating very big lower level device "
  1230. "to currently maximum possible %llu sectors <==\n",
  1231. (unsigned long long) max_possible_sectors);
  1232. if (new_disk_conf->meta_dev_idx >= 0)
  1233. dev_warn(DEV, "==>> using internal or flexible "
  1234. "meta data may help <<==\n");
  1235. }
  1236. drbd_suspend_io(mdev);
  1237. /* also wait for the last barrier ack. */
  1238. /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
  1239. * We need a way to either ignore barrier acks for barriers sent before a device
  1240. * was attached, or a way to wait for all pending barrier acks to come in.
  1241. * As barriers are counted per resource,
  1242. * we'd need to suspend io on all devices of a resource.
  1243. */
  1244. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
  1245. /* and for any other previously queued work */
  1246. drbd_flush_workqueue(mdev);
  1247. rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
  1248. retcode = rv; /* FIXME: Type mismatch. */
  1249. drbd_resume_io(mdev);
  1250. if (rv < SS_SUCCESS)
  1251. goto fail;
  1252. if (!get_ldev_if_state(mdev, D_ATTACHING))
  1253. goto force_diskless;
  1254. drbd_md_set_sector_offsets(mdev, nbc);
  1255. if (!mdev->bitmap) {
  1256. if (drbd_bm_init(mdev)) {
  1257. retcode = ERR_NOMEM;
  1258. goto force_diskless_dec;
  1259. }
  1260. }
  1261. retcode = drbd_md_read(mdev, nbc);
  1262. if (retcode != NO_ERROR)
  1263. goto force_diskless_dec;
  1264. if (mdev->state.conn < C_CONNECTED &&
  1265. mdev->state.role == R_PRIMARY &&
  1266. (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
  1267. dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
  1268. (unsigned long long)mdev->ed_uuid);
  1269. retcode = ERR_DATA_NOT_CURRENT;
  1270. goto force_diskless_dec;
  1271. }
  1272. /* Since we are diskless, fix the activity log first... */
  1273. if (drbd_check_al_size(mdev, new_disk_conf)) {
  1274. retcode = ERR_NOMEM;
  1275. goto force_diskless_dec;
  1276. }
  1277. /* Prevent shrinking of consistent devices ! */
  1278. if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
  1279. drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
  1280. dev_warn(DEV, "refusing to truncate a consistent device\n");
  1281. retcode = ERR_DISK_TOO_SMALL;
  1282. goto force_diskless_dec;
  1283. }
  1284. /* Reset the "barriers don't work" bits here, then force meta data to
  1285. * be written, to ensure we determine if barriers are supported. */
  1286. if (new_disk_conf->md_flushes)
  1287. clear_bit(MD_NO_FUA, &mdev->flags);
  1288. else
  1289. set_bit(MD_NO_FUA, &mdev->flags);
  1290. /* Point of no return reached.
  1291. * Devices and memory are no longer released by error cleanup below.
  1292. * now mdev takes over responsibility, and the state engine should
  1293. * clean it up somewhere. */
  1294. D_ASSERT(mdev->ldev == NULL);
  1295. mdev->ldev = nbc;
  1296. mdev->resync = resync_lru;
  1297. mdev->rs_plan_s = new_plan;
  1298. nbc = NULL;
  1299. resync_lru = NULL;
  1300. new_disk_conf = NULL;
  1301. new_plan = NULL;
  1302. drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
  1303. if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
  1304. set_bit(CRASHED_PRIMARY, &mdev->flags);
  1305. else
  1306. clear_bit(CRASHED_PRIMARY, &mdev->flags);
  1307. if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  1308. !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
  1309. set_bit(CRASHED_PRIMARY, &mdev->flags);
  1310. mdev->send_cnt = 0;
  1311. mdev->recv_cnt = 0;
  1312. mdev->read_cnt = 0;
  1313. mdev->writ_cnt = 0;
  1314. drbd_reconsider_max_bio_size(mdev);
  1315. /* If I am currently not R_PRIMARY,
  1316. * but meta data primary indicator is set,
  1317. * I just now recover from a hard crash,
  1318. * and have been R_PRIMARY before that crash.
  1319. *
  1320. * Now, if I had no connection before that crash
  1321. * (have been degraded R_PRIMARY), chances are that
  1322. * I won't find my peer now either.
  1323. *
  1324. * In that case, and _only_ in that case,
  1325. * we use the degr-wfc-timeout instead of the default,
  1326. * so we can automatically recover from a crash of a
  1327. * degraded but active "cluster" after a certain timeout.
  1328. */
  1329. clear_bit(USE_DEGR_WFC_T, &mdev->flags);
  1330. if (mdev->state.role != R_PRIMARY &&
  1331. drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  1332. !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
  1333. set_bit(USE_DEGR_WFC_T, &mdev->flags);
  1334. dd = drbd_determine_dev_size(mdev, 0);
  1335. if (dd == dev_size_error) {
  1336. retcode = ERR_NOMEM_BITMAP;
  1337. goto force_diskless_dec;
  1338. } else if (dd == grew)
  1339. set_bit(RESYNC_AFTER_NEG, &mdev->flags);
  1340. if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
  1341. (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
  1342. drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
  1343. dev_info(DEV, "Assuming that all blocks are out of sync "
  1344. "(aka FullSync)\n");
  1345. if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
  1346. "set_n_write from attaching", BM_LOCKED_MASK)) {
  1347. retcode = ERR_IO_MD_DISK;
  1348. goto force_diskless_dec;
  1349. }
  1350. } else {
  1351. if (drbd_bitmap_io(mdev, &drbd_bm_read,
  1352. "read from attaching", BM_LOCKED_MASK)) {
  1353. retcode = ERR_IO_MD_DISK;
  1354. goto force_diskless_dec;
  1355. }
  1356. }
  1357. if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
  1358. drbd_suspend_al(mdev); /* IO is still suspended here... */
  1359. spin_lock_irq(&mdev->tconn->req_lock);
  1360. os = drbd_read_state(mdev);
  1361. ns = os;
  1362. /* If MDF_CONSISTENT is not set go into inconsistent state,
  1363. otherwise investigate MDF_WasUpToDate...
  1364. If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
  1365. otherwise into D_CONSISTENT state.
  1366. */
  1367. if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
  1368. if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
  1369. ns.disk = D_CONSISTENT;
  1370. else
  1371. ns.disk = D_OUTDATED;
  1372. } else {
  1373. ns.disk = D_INCONSISTENT;
  1374. }
  1375. if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
  1376. ns.pdsk = D_OUTDATED;
  1377. rcu_read_lock();
  1378. if (ns.disk == D_CONSISTENT &&
  1379. (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
  1380. ns.disk = D_UP_TO_DATE;
  1381. /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
  1382. MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
  1383. this point, because drbd_request_state() modifies these
  1384. flags. */
  1385. if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
  1386. mdev->ldev->md.flags &= MDF_AL_DISABLED;
  1387. else
  1388. mdev->ldev->md.flags |= MDF_AL_DISABLED;
  1389. rcu_read_unlock();
  1390. /* In case we are C_CONNECTED postpone any decision on the new disk
  1391. state after the negotiation phase. */
  1392. if (mdev->state.conn == C_CONNECTED) {
  1393. mdev->new_state_tmp.i = ns.i;
  1394. ns.i = os.i;
  1395. ns.disk = D_NEGOTIATING;
  1396. /* We expect to receive up-to-date UUIDs soon.
  1397. To avoid a race in receive_state, free p_uuid while
  1398. holding req_lock. I.e. atomic with the state change */
  1399. kfree(mdev->p_uuid);
  1400. mdev->p_uuid = NULL;
  1401. }
  1402. rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
  1403. spin_unlock_irq(&mdev->tconn->req_lock);
  1404. if (rv < SS_SUCCESS)
  1405. goto force_diskless_dec;
  1406. mod_timer(&mdev->request_timer, jiffies + HZ);
  1407. if (mdev->state.role == R_PRIMARY)
  1408. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  1409. else
  1410. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  1411. drbd_md_mark_dirty(mdev);
  1412. drbd_md_sync(mdev);
  1413. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1414. put_ldev(mdev);
  1415. conn_reconfig_done(mdev->tconn);
  1416. drbd_adm_finish(info, retcode);
  1417. return 0;
  1418. force_diskless_dec:
  1419. put_ldev(mdev);
  1420. force_diskless:
  1421. drbd_force_state(mdev, NS(disk, D_DISKLESS));
  1422. drbd_md_sync(mdev);
  1423. fail:
  1424. conn_reconfig_done(mdev->tconn);
  1425. if (nbc) {
  1426. if (nbc->backing_bdev)
  1427. blkdev_put(nbc->backing_bdev,
  1428. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1429. if (nbc->md_bdev)
  1430. blkdev_put(nbc->md_bdev,
  1431. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1432. kfree(nbc);
  1433. }
  1434. kfree(new_disk_conf);
  1435. lc_destroy(resync_lru);
  1436. kfree(new_plan);
  1437. finish:
  1438. drbd_adm_finish(info, retcode);
  1439. return 0;
  1440. }
  1441. static int adm_detach(struct drbd_conf *mdev, int force)
  1442. {
  1443. enum drbd_state_rv retcode;
  1444. int ret;
  1445. if (force) {
  1446. drbd_force_state(mdev, NS(disk, D_FAILED));
  1447. retcode = SS_SUCCESS;
  1448. goto out;
  1449. }
  1450. drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
  1451. drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
  1452. retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
  1453. drbd_md_put_buffer(mdev);
  1454. /* D_FAILED will transition to DISKLESS. */
  1455. ret = wait_event_interruptible(mdev->misc_wait,
  1456. mdev->state.disk != D_FAILED);
  1457. drbd_resume_io(mdev);
  1458. if ((int)retcode == (int)SS_IS_DISKLESS)
  1459. retcode = SS_NOTHING_TO_DO;
  1460. if (ret)
  1461. retcode = ERR_INTR;
  1462. out:
  1463. return retcode;
  1464. }
  1465. /* Detaching the disk is a process in multiple stages. First we need to lock
  1466. * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
  1467. * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
  1468. * internal references as well.
  1469. * Only then we have finally detached. */
  1470. int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
  1471. {
  1472. enum drbd_ret_code retcode;
  1473. struct detach_parms parms = { };
  1474. int err;
  1475. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1476. if (!adm_ctx.reply_skb)
  1477. return retcode;
  1478. if (retcode != NO_ERROR)
  1479. goto out;
  1480. if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
  1481. err = detach_parms_from_attrs(&parms, info);
  1482. if (err) {
  1483. retcode = ERR_MANDATORY_TAG;
  1484. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1485. goto out;
  1486. }
  1487. }
  1488. retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
  1489. out:
  1490. drbd_adm_finish(info, retcode);
  1491. return 0;
  1492. }
  1493. static bool conn_resync_running(struct drbd_tconn *tconn)
  1494. {
  1495. struct drbd_conf *mdev;
  1496. bool rv = false;
  1497. int vnr;
  1498. rcu_read_lock();
  1499. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  1500. if (mdev->state.conn == C_SYNC_SOURCE ||
  1501. mdev->state.conn == C_SYNC_TARGET ||
  1502. mdev->state.conn == C_PAUSED_SYNC_S ||
  1503. mdev->state.conn == C_PAUSED_SYNC_T) {
  1504. rv = true;
  1505. break;
  1506. }
  1507. }
  1508. rcu_read_unlock();
  1509. return rv;
  1510. }
  1511. static bool conn_ov_running(struct drbd_tconn *tconn)
  1512. {
  1513. struct drbd_conf *mdev;
  1514. bool rv = false;
  1515. int vnr;
  1516. rcu_read_lock();
  1517. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  1518. if (mdev->state.conn == C_VERIFY_S ||
  1519. mdev->state.conn == C_VERIFY_T) {
  1520. rv = true;
  1521. break;
  1522. }
  1523. }
  1524. rcu_read_unlock();
  1525. return rv;
  1526. }
  1527. static enum drbd_ret_code
  1528. _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
  1529. {
  1530. struct drbd_conf *mdev;
  1531. int i;
  1532. if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
  1533. if (new_conf->wire_protocol != old_conf->wire_protocol)
  1534. return ERR_NEED_APV_100;
  1535. if (new_conf->two_primaries != old_conf->two_primaries)
  1536. return ERR_NEED_APV_100;
  1537. if (!new_conf->integrity_alg != !old_conf->integrity_alg)
  1538. return ERR_NEED_APV_100;
  1539. if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
  1540. return ERR_NEED_APV_100;
  1541. }
  1542. if (!new_conf->two_primaries &&
  1543. conn_highest_role(tconn) == R_PRIMARY &&
  1544. conn_highest_peer(tconn) == R_PRIMARY)
  1545. return ERR_NEED_ALLOW_TWO_PRI;
  1546. if (new_conf->two_primaries &&
  1547. (new_conf->wire_protocol != DRBD_PROT_C))
  1548. return ERR_NOT_PROTO_C;
  1549. idr_for_each_entry(&tconn->volumes, mdev, i) {
  1550. if (get_ldev(mdev)) {
  1551. enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
  1552. put_ldev(mdev);
  1553. if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
  1554. return ERR_STONITH_AND_PROT_A;
  1555. }
  1556. if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
  1557. return ERR_DISCARD;
  1558. }
  1559. if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
  1560. return ERR_CONG_NOT_PROTO_A;
  1561. return NO_ERROR;
  1562. }
  1563. static enum drbd_ret_code
  1564. check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
  1565. {
  1566. static enum drbd_ret_code rv;
  1567. struct drbd_conf *mdev;
  1568. int i;
  1569. rcu_read_lock();
  1570. rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
  1571. rcu_read_unlock();
  1572. /* tconn->volumes protected by genl_lock() here */
  1573. idr_for_each_entry(&tconn->volumes, mdev, i) {
  1574. if (!mdev->bitmap) {
  1575. if(drbd_bm_init(mdev))
  1576. return ERR_NOMEM;
  1577. }
  1578. }
  1579. return rv;
  1580. }
  1581. struct crypto {
  1582. struct crypto_hash *verify_tfm;
  1583. struct crypto_hash *csums_tfm;
  1584. struct crypto_hash *cram_hmac_tfm;
  1585. struct crypto_hash *integrity_tfm;
  1586. };
  1587. static int
  1588. alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
  1589. {
  1590. if (!tfm_name[0])
  1591. return NO_ERROR;
  1592. *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
  1593. if (IS_ERR(*tfm)) {
  1594. *tfm = NULL;
  1595. return err_alg;
  1596. }
  1597. return NO_ERROR;
  1598. }
  1599. static enum drbd_ret_code
  1600. alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
  1601. {
  1602. char hmac_name[CRYPTO_MAX_ALG_NAME];
  1603. enum drbd_ret_code rv;
  1604. rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
  1605. ERR_CSUMS_ALG);
  1606. if (rv != NO_ERROR)
  1607. return rv;
  1608. rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
  1609. ERR_VERIFY_ALG);
  1610. if (rv != NO_ERROR)
  1611. return rv;
  1612. rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
  1613. ERR_INTEGRITY_ALG);
  1614. if (rv != NO_ERROR)
  1615. return rv;
  1616. if (new_conf->cram_hmac_alg[0] != 0) {
  1617. snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
  1618. new_conf->cram_hmac_alg);
  1619. rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
  1620. ERR_AUTH_ALG);
  1621. }
  1622. return rv;
  1623. }
  1624. static void free_crypto(struct crypto *crypto)
  1625. {
  1626. crypto_free_hash(crypto->cram_hmac_tfm);
  1627. crypto_free_hash(crypto->integrity_tfm);
  1628. crypto_free_hash(crypto->csums_tfm);
  1629. crypto_free_hash(crypto->verify_tfm);
  1630. }
  1631. int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
  1632. {
  1633. enum drbd_ret_code retcode;
  1634. struct drbd_tconn *tconn;
  1635. struct net_conf *old_conf, *new_conf = NULL;
  1636. int err;
  1637. int ovr; /* online verify running */
  1638. int rsr; /* re-sync running */
  1639. struct crypto crypto = { };
  1640. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
  1641. if (!adm_ctx.reply_skb)
  1642. return retcode;
  1643. if (retcode != NO_ERROR)
  1644. goto out;
  1645. tconn = adm_ctx.tconn;
  1646. new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
  1647. if (!new_conf) {
  1648. retcode = ERR_NOMEM;
  1649. goto out;
  1650. }
  1651. conn_reconfig_start(tconn);
  1652. mutex_lock(&tconn->data.mutex);
  1653. mutex_lock(&tconn->conf_update);
  1654. old_conf = tconn->net_conf;
  1655. if (!old_conf) {
  1656. drbd_msg_put_info("net conf missing, try connect");
  1657. retcode = ERR_INVALID_REQUEST;
  1658. goto fail;
  1659. }
  1660. *new_conf = *old_conf;
  1661. if (should_set_defaults(info))
  1662. set_net_conf_defaults(new_conf);
  1663. err = net_conf_from_attrs_for_change(new_conf, info);
  1664. if (err && err != -ENOMSG) {
  1665. retcode = ERR_MANDATORY_TAG;
  1666. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1667. goto fail;
  1668. }
  1669. retcode = check_net_options(tconn, new_conf);
  1670. if (retcode != NO_ERROR)
  1671. goto fail;
  1672. /* re-sync running */
  1673. rsr = conn_resync_running(tconn);
  1674. if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
  1675. retcode = ERR_CSUMS_RESYNC_RUNNING;
  1676. goto fail;
  1677. }
  1678. /* online verify running */
  1679. ovr = conn_ov_running(tconn);
  1680. if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
  1681. retcode = ERR_VERIFY_RUNNING;
  1682. goto fail;
  1683. }
  1684. retcode = alloc_crypto(&crypto, new_conf);
  1685. if (retcode != NO_ERROR)
  1686. goto fail;
  1687. rcu_assign_pointer(tconn->net_conf, new_conf);
  1688. if (!rsr) {
  1689. crypto_free_hash(tconn->csums_tfm);
  1690. tconn->csums_tfm = crypto.csums_tfm;
  1691. crypto.csums_tfm = NULL;
  1692. }
  1693. if (!ovr) {
  1694. crypto_free_hash(tconn->verify_tfm);
  1695. tconn->verify_tfm = crypto.verify_tfm;
  1696. crypto.verify_tfm = NULL;
  1697. }
  1698. crypto_free_hash(tconn->integrity_tfm);
  1699. tconn->integrity_tfm = crypto.integrity_tfm;
  1700. if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
  1701. /* Do this without trying to take tconn->data.mutex again. */
  1702. __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
  1703. crypto_free_hash(tconn->cram_hmac_tfm);
  1704. tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
  1705. mutex_unlock(&tconn->conf_update);
  1706. mutex_unlock(&tconn->data.mutex);
  1707. synchronize_rcu();
  1708. kfree(old_conf);
  1709. if (tconn->cstate >= C_WF_REPORT_PARAMS)
  1710. drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
  1711. goto done;
  1712. fail:
  1713. mutex_unlock(&tconn->conf_update);
  1714. mutex_unlock(&tconn->data.mutex);
  1715. free_crypto(&crypto);
  1716. kfree(new_conf);
  1717. done:
  1718. conn_reconfig_done(tconn);
  1719. out:
  1720. drbd_adm_finish(info, retcode);
  1721. return 0;
  1722. }
  1723. int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
  1724. {
  1725. struct drbd_conf *mdev;
  1726. struct net_conf *old_conf, *new_conf = NULL;
  1727. struct crypto crypto = { };
  1728. struct drbd_tconn *tconn;
  1729. enum drbd_ret_code retcode;
  1730. int i;
  1731. int err;
  1732. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
  1733. if (!adm_ctx.reply_skb)
  1734. return retcode;
  1735. if (retcode != NO_ERROR)
  1736. goto out;
  1737. if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
  1738. drbd_msg_put_info("connection endpoint(s) missing");
  1739. retcode = ERR_INVALID_REQUEST;
  1740. goto out;
  1741. }
  1742. /* No need for _rcu here. All reconfiguration is
  1743. * strictly serialized on genl_lock(). We are protected against
  1744. * concurrent reconfiguration/addition/deletion */
  1745. list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
  1746. if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
  1747. !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
  1748. retcode = ERR_LOCAL_ADDR;
  1749. goto out;
  1750. }
  1751. if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
  1752. !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
  1753. retcode = ERR_PEER_ADDR;
  1754. goto out;
  1755. }
  1756. }
  1757. tconn = adm_ctx.tconn;
  1758. conn_reconfig_start(tconn);
  1759. if (tconn->cstate > C_STANDALONE) {
  1760. retcode = ERR_NET_CONFIGURED;
  1761. goto fail;
  1762. }
  1763. /* allocation not in the IO path, drbdsetup / netlink process context */
  1764. new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
  1765. if (!new_conf) {
  1766. retcode = ERR_NOMEM;
  1767. goto fail;
  1768. }
  1769. set_net_conf_defaults(new_conf);
  1770. err = net_conf_from_attrs(new_conf, info);
  1771. if (err && err != -ENOMSG) {
  1772. retcode = ERR_MANDATORY_TAG;
  1773. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1774. goto fail;
  1775. }
  1776. retcode = check_net_options(tconn, new_conf);
  1777. if (retcode != NO_ERROR)
  1778. goto fail;
  1779. retcode = alloc_crypto(&crypto, new_conf);
  1780. if (retcode != NO_ERROR)
  1781. goto fail;
  1782. ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
  1783. conn_flush_workqueue(tconn);
  1784. mutex_lock(&tconn->conf_update);
  1785. old_conf = tconn->net_conf;
  1786. if (old_conf) {
  1787. retcode = ERR_NET_CONFIGURED;
  1788. mutex_unlock(&tconn->conf_update);
  1789. goto fail;
  1790. }
  1791. rcu_assign_pointer(tconn->net_conf, new_conf);
  1792. conn_free_crypto(tconn);
  1793. tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
  1794. tconn->integrity_tfm = crypto.integrity_tfm;
  1795. tconn->csums_tfm = crypto.csums_tfm;
  1796. tconn->verify_tfm = crypto.verify_tfm;
  1797. tconn->my_addr_len = nla_len(adm_ctx.my_addr);
  1798. memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
  1799. tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
  1800. memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
  1801. mutex_unlock(&tconn->conf_update);
  1802. rcu_read_lock();
  1803. idr_for_each_entry(&tconn->volumes, mdev, i) {
  1804. mdev->send_cnt = 0;
  1805. mdev->recv_cnt = 0;
  1806. }
  1807. rcu_read_unlock();
  1808. retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
  1809. conn_reconfig_done(tconn);
  1810. drbd_adm_finish(info, retcode);
  1811. return 0;
  1812. fail:
  1813. free_crypto(&crypto);
  1814. kfree(new_conf);
  1815. conn_reconfig_done(tconn);
  1816. out:
  1817. drbd_adm_finish(info, retcode);
  1818. return 0;
  1819. }
  1820. static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
  1821. {
  1822. enum drbd_state_rv rv;
  1823. rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
  1824. force ? CS_HARD : 0);
  1825. switch (rv) {
  1826. case SS_NOTHING_TO_DO:
  1827. break;
  1828. case SS_ALREADY_STANDALONE:
  1829. return SS_SUCCESS;
  1830. case SS_PRIMARY_NOP:
  1831. /* Our state checking code wants to see the peer outdated. */
  1832. rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
  1833. pdsk, D_OUTDATED), CS_VERBOSE);
  1834. break;
  1835. case SS_CW_FAILED_BY_PEER:
  1836. /* The peer probably wants to see us outdated. */
  1837. rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
  1838. disk, D_OUTDATED), 0);
  1839. if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
  1840. rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
  1841. CS_HARD);
  1842. }
  1843. break;
  1844. default:;
  1845. /* no special handling necessary */
  1846. }
  1847. if (rv >= SS_SUCCESS) {
  1848. enum drbd_state_rv rv2;
  1849. /* No one else can reconfigure the network while I am here.
  1850. * The state handling only uses drbd_thread_stop_nowait(),
  1851. * we want to really wait here until the receiver is no more.
  1852. */
  1853. drbd_thread_stop(&adm_ctx.tconn->receiver);
  1854. /* Race breaker. This additional state change request may be
  1855. * necessary, if this was a forced disconnect during a receiver
  1856. * restart. We may have "killed" the receiver thread just
  1857. * after drbdd_init() returned. Typically, we should be
  1858. * C_STANDALONE already, now, and this becomes a no-op.
  1859. */
  1860. rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
  1861. CS_VERBOSE | CS_HARD);
  1862. if (rv2 < SS_SUCCESS)
  1863. conn_err(tconn,
  1864. "unexpected rv2=%d in conn_try_disconnect()\n",
  1865. rv2);
  1866. }
  1867. return rv;
  1868. }
  1869. int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
  1870. {
  1871. struct disconnect_parms parms;
  1872. struct drbd_tconn *tconn;
  1873. enum drbd_state_rv rv;
  1874. enum drbd_ret_code retcode;
  1875. int err;
  1876. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
  1877. if (!adm_ctx.reply_skb)
  1878. return retcode;
  1879. if (retcode != NO_ERROR)
  1880. goto fail;
  1881. tconn = adm_ctx.tconn;
  1882. memset(&parms, 0, sizeof(parms));
  1883. if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
  1884. err = disconnect_parms_from_attrs(&parms, info);
  1885. if (err) {
  1886. retcode = ERR_MANDATORY_TAG;
  1887. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1888. goto fail;
  1889. }
  1890. }
  1891. rv = conn_try_disconnect(tconn, parms.force_disconnect);
  1892. if (rv < SS_SUCCESS)
  1893. retcode = rv; /* FIXME: Type mismatch. */
  1894. else
  1895. retcode = NO_ERROR;
  1896. fail:
  1897. drbd_adm_finish(info, retcode);
  1898. return 0;
  1899. }
  1900. void resync_after_online_grow(struct drbd_conf *mdev)
  1901. {
  1902. int iass; /* I am sync source */
  1903. dev_info(DEV, "Resync of new storage after online grow\n");
  1904. if (mdev->state.role != mdev->state.peer)
  1905. iass = (mdev->state.role == R_PRIMARY);
  1906. else
  1907. iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
  1908. if (iass)
  1909. drbd_start_resync(mdev, C_SYNC_SOURCE);
  1910. else
  1911. _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
  1912. }
  1913. int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
  1914. {
  1915. struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
  1916. struct resize_parms rs;
  1917. struct drbd_conf *mdev;
  1918. enum drbd_ret_code retcode;
  1919. enum determine_dev_size dd;
  1920. enum dds_flags ddsf;
  1921. sector_t u_size;
  1922. int err;
  1923. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1924. if (!adm_ctx.reply_skb)
  1925. return retcode;
  1926. if (retcode != NO_ERROR)
  1927. goto fail;
  1928. memset(&rs, 0, sizeof(struct resize_parms));
  1929. if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
  1930. err = resize_parms_from_attrs(&rs, info);
  1931. if (err) {
  1932. retcode = ERR_MANDATORY_TAG;
  1933. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1934. goto fail;
  1935. }
  1936. }
  1937. mdev = adm_ctx.mdev;
  1938. if (mdev->state.conn > C_CONNECTED) {
  1939. retcode = ERR_RESIZE_RESYNC;
  1940. goto fail;
  1941. }
  1942. if (mdev->state.role == R_SECONDARY &&
  1943. mdev->state.peer == R_SECONDARY) {
  1944. retcode = ERR_NO_PRIMARY;
  1945. goto fail;
  1946. }
  1947. if (!get_ldev(mdev)) {
  1948. retcode = ERR_NO_DISK;
  1949. goto fail;
  1950. }
  1951. if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
  1952. retcode = ERR_NEED_APV_93;
  1953. goto fail_ldev;
  1954. }
  1955. rcu_read_lock();
  1956. u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
  1957. rcu_read_unlock();
  1958. if (u_size != (sector_t)rs.resize_size) {
  1959. new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
  1960. if (!new_disk_conf) {
  1961. retcode = ERR_NOMEM;
  1962. goto fail_ldev;
  1963. }
  1964. }
  1965. if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
  1966. mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
  1967. if (new_disk_conf) {
  1968. mutex_lock(&mdev->tconn->conf_update);
  1969. old_disk_conf = mdev->ldev->disk_conf;
  1970. *new_disk_conf = *old_disk_conf;
  1971. new_disk_conf->disk_size = (sector_t)rs.resize_size;
  1972. rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
  1973. mutex_unlock(&mdev->tconn->conf_update);
  1974. synchronize_rcu();
  1975. kfree(old_disk_conf);
  1976. }
  1977. ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
  1978. dd = drbd_determine_dev_size(mdev, ddsf);
  1979. drbd_md_sync(mdev);
  1980. put_ldev(mdev);
  1981. if (dd == dev_size_error) {
  1982. retcode = ERR_NOMEM_BITMAP;
  1983. goto fail;
  1984. }
  1985. if (mdev->state.conn == C_CONNECTED) {
  1986. if (dd == grew)
  1987. set_bit(RESIZE_PENDING, &mdev->flags);
  1988. drbd_send_uuids(mdev);
  1989. drbd_send_sizes(mdev, 1, ddsf);
  1990. }
  1991. fail:
  1992. drbd_adm_finish(info, retcode);
  1993. return 0;
  1994. fail_ldev:
  1995. put_ldev(mdev);
  1996. goto fail;
  1997. }
  1998. int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
  1999. {
  2000. enum drbd_ret_code retcode;
  2001. struct drbd_tconn *tconn;
  2002. struct res_opts res_opts;
  2003. int err;
  2004. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
  2005. if (!adm_ctx.reply_skb)
  2006. return retcode;
  2007. if (retcode != NO_ERROR)
  2008. goto fail;
  2009. tconn = adm_ctx.tconn;
  2010. res_opts = tconn->res_opts;
  2011. if (should_set_defaults(info))
  2012. set_res_opts_defaults(&res_opts);
  2013. err = res_opts_from_attrs(&res_opts, info);
  2014. if (err && err != -ENOMSG) {
  2015. retcode = ERR_MANDATORY_TAG;
  2016. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2017. goto fail;
  2018. }
  2019. err = set_resource_options(tconn, &res_opts);
  2020. if (err) {
  2021. retcode = ERR_INVALID_REQUEST;
  2022. if (err == -ENOMEM)
  2023. retcode = ERR_NOMEM;
  2024. }
  2025. fail:
  2026. drbd_adm_finish(info, retcode);
  2027. return 0;
  2028. }
  2029. int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
  2030. {
  2031. struct drbd_conf *mdev;
  2032. int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
  2033. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2034. if (!adm_ctx.reply_skb)
  2035. return retcode;
  2036. if (retcode != NO_ERROR)
  2037. goto out;
  2038. mdev = adm_ctx.mdev;
  2039. /* If there is still bitmap IO pending, probably because of a previous
  2040. * resync just being finished, wait for it before requesting a new resync. */
  2041. drbd_suspend_io(mdev);
  2042. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  2043. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
  2044. if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
  2045. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  2046. while (retcode == SS_NEED_CONNECTION) {
  2047. spin_lock_irq(&mdev->tconn->req_lock);
  2048. if (mdev->state.conn < C_CONNECTED)
  2049. retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
  2050. spin_unlock_irq(&mdev->tconn->req_lock);
  2051. if (retcode != SS_NEED_CONNECTION)
  2052. break;
  2053. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  2054. }
  2055. drbd_resume_io(mdev);
  2056. out:
  2057. drbd_adm_finish(info, retcode);
  2058. return 0;
  2059. }
  2060. static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
  2061. union drbd_state mask, union drbd_state val)
  2062. {
  2063. enum drbd_ret_code retcode;
  2064. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2065. if (!adm_ctx.reply_skb)
  2066. return retcode;
  2067. if (retcode != NO_ERROR)
  2068. goto out;
  2069. retcode = drbd_request_state(adm_ctx.mdev, mask, val);
  2070. out:
  2071. drbd_adm_finish(info, retcode);
  2072. return 0;
  2073. }
  2074. static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
  2075. {
  2076. int rv;
  2077. rv = drbd_bmio_set_n_write(mdev);
  2078. drbd_suspend_al(mdev);
  2079. return rv;
  2080. }
  2081. int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
  2082. {
  2083. int retcode; /* drbd_ret_code, drbd_state_rv */
  2084. struct drbd_conf *mdev;
  2085. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2086. if (!adm_ctx.reply_skb)
  2087. return retcode;
  2088. if (retcode != NO_ERROR)
  2089. goto out;
  2090. mdev = adm_ctx.mdev;
  2091. /* If there is still bitmap IO pending, probably because of a previous
  2092. * resync just being finished, wait for it before requesting a new resync. */
  2093. drbd_suspend_io(mdev);
  2094. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  2095. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
  2096. if (retcode < SS_SUCCESS) {
  2097. if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
  2098. /* The peer will get a resync upon connect anyways.
  2099. * Just make that into a full resync. */
  2100. retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
  2101. if (retcode >= SS_SUCCESS) {
  2102. if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
  2103. "set_n_write from invalidate_peer",
  2104. BM_LOCKED_SET_ALLOWED))
  2105. retcode = ERR_IO_MD_DISK;
  2106. }
  2107. } else
  2108. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
  2109. }
  2110. drbd_resume_io(mdev);
  2111. out:
  2112. drbd_adm_finish(info, retcode);
  2113. return 0;
  2114. }
  2115. int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
  2116. {
  2117. enum drbd_ret_code retcode;
  2118. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2119. if (!adm_ctx.reply_skb)
  2120. return retcode;
  2121. if (retcode != NO_ERROR)
  2122. goto out;
  2123. if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
  2124. retcode = ERR_PAUSE_IS_SET;
  2125. out:
  2126. drbd_adm_finish(info, retcode);
  2127. return 0;
  2128. }
  2129. int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
  2130. {
  2131. union drbd_dev_state s;
  2132. enum drbd_ret_code retcode;
  2133. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2134. if (!adm_ctx.reply_skb)
  2135. return retcode;
  2136. if (retcode != NO_ERROR)
  2137. goto out;
  2138. if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
  2139. s = adm_ctx.mdev->state;
  2140. if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
  2141. retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
  2142. s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
  2143. } else {
  2144. retcode = ERR_PAUSE_IS_CLEAR;
  2145. }
  2146. }
  2147. out:
  2148. drbd_adm_finish(info, retcode);
  2149. return 0;
  2150. }
  2151. int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
  2152. {
  2153. return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
  2154. }
  2155. int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
  2156. {
  2157. struct drbd_conf *mdev;
  2158. int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
  2159. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2160. if (!adm_ctx.reply_skb)
  2161. return retcode;
  2162. if (retcode != NO_ERROR)
  2163. goto out;
  2164. mdev = adm_ctx.mdev;
  2165. if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
  2166. drbd_uuid_new_current(mdev);
  2167. clear_bit(NEW_CUR_UUID, &mdev->flags);
  2168. }
  2169. drbd_suspend_io(mdev);
  2170. retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
  2171. if (retcode == SS_SUCCESS) {
  2172. if (mdev->state.conn < C_CONNECTED)
  2173. tl_clear(mdev->tconn);
  2174. if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
  2175. tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
  2176. }
  2177. drbd_resume_io(mdev);
  2178. out:
  2179. drbd_adm_finish(info, retcode);
  2180. return 0;
  2181. }
  2182. int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
  2183. {
  2184. return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
  2185. }
  2186. int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
  2187. {
  2188. struct nlattr *nla;
  2189. nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
  2190. if (!nla)
  2191. goto nla_put_failure;
  2192. if (vnr != VOLUME_UNSPECIFIED &&
  2193. nla_put_u32(skb, T_ctx_volume, vnr))
  2194. goto nla_put_failure;
  2195. if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
  2196. goto nla_put_failure;
  2197. if (tconn->my_addr_len &&
  2198. nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
  2199. goto nla_put_failure;
  2200. if (tconn->peer_addr_len &&
  2201. nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
  2202. goto nla_put_failure;
  2203. nla_nest_end(skb, nla);
  2204. return 0;
  2205. nla_put_failure:
  2206. if (nla)
  2207. nla_nest_cancel(skb, nla);
  2208. return -EMSGSIZE;
  2209. }
  2210. int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
  2211. const struct sib_info *sib)
  2212. {
  2213. struct state_info *si = NULL; /* for sizeof(si->member); */
  2214. struct net_conf *nc;
  2215. struct nlattr *nla;
  2216. int got_ldev;
  2217. int err = 0;
  2218. int exclude_sensitive;
  2219. /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
  2220. * to. So we better exclude_sensitive information.
  2221. *
  2222. * If sib == NULL, this is drbd_adm_get_status, executed synchronously
  2223. * in the context of the requesting user process. Exclude sensitive
  2224. * information, unless current has superuser.
  2225. *
  2226. * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
  2227. * relies on the current implementation of netlink_dump(), which
  2228. * executes the dump callback successively from netlink_recvmsg(),
  2229. * always in the context of the receiving process */
  2230. exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
  2231. got_ldev = get_ldev(mdev);
  2232. /* We need to add connection name and volume number information still.
  2233. * Minor number is in drbd_genlmsghdr. */
  2234. if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
  2235. goto nla_put_failure;
  2236. if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
  2237. goto nla_put_failure;
  2238. rcu_read_lock();
  2239. if (got_ldev)
  2240. if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
  2241. goto nla_put_failure;
  2242. nc = rcu_dereference(mdev->tconn->net_conf);
  2243. if (nc)
  2244. err = net_conf_to_skb(skb, nc, exclude_sensitive);
  2245. rcu_read_unlock();
  2246. if (err)
  2247. goto nla_put_failure;
  2248. nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
  2249. if (!nla)
  2250. goto nla_put_failure;
  2251. if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
  2252. nla_put_u32(skb, T_current_state, mdev->state.i) ||
  2253. nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
  2254. nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)))
  2255. goto nla_put_failure;
  2256. if (got_ldev) {
  2257. if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
  2258. nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid) ||
  2259. nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
  2260. nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
  2261. goto nla_put_failure;
  2262. if (C_SYNC_SOURCE <= mdev->state.conn &&
  2263. C_PAUSED_SYNC_T >= mdev->state.conn) {
  2264. if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
  2265. nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
  2266. goto nla_put_failure;
  2267. }
  2268. }
  2269. if (sib) {
  2270. switch(sib->sib_reason) {
  2271. case SIB_SYNC_PROGRESS:
  2272. case SIB_GET_STATUS_REPLY:
  2273. break;
  2274. case SIB_STATE_CHANGE:
  2275. if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
  2276. nla_put_u32(skb, T_new_state, sib->ns.i))
  2277. goto nla_put_failure;
  2278. break;
  2279. case SIB_HELPER_POST:
  2280. if (nla_put_u32(skb, T_helper_exit_code,
  2281. sib->helper_exit_code))
  2282. goto nla_put_failure;
  2283. /* fall through */
  2284. case SIB_HELPER_PRE:
  2285. if (nla_put_string(skb, T_helper, sib->helper_name))
  2286. goto nla_put_failure;
  2287. break;
  2288. }
  2289. }
  2290. nla_nest_end(skb, nla);
  2291. if (0)
  2292. nla_put_failure:
  2293. err = -EMSGSIZE;
  2294. if (got_ldev)
  2295. put_ldev(mdev);
  2296. return err;
  2297. }
  2298. int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
  2299. {
  2300. enum drbd_ret_code retcode;
  2301. int err;
  2302. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2303. if (!adm_ctx.reply_skb)
  2304. return retcode;
  2305. if (retcode != NO_ERROR)
  2306. goto out;
  2307. err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
  2308. if (err) {
  2309. nlmsg_free(adm_ctx.reply_skb);
  2310. return err;
  2311. }
  2312. out:
  2313. drbd_adm_finish(info, retcode);
  2314. return 0;
  2315. }
  2316. int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
  2317. {
  2318. struct drbd_conf *mdev;
  2319. struct drbd_genlmsghdr *dh;
  2320. struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
  2321. struct drbd_tconn *tconn = NULL;
  2322. struct drbd_tconn *tmp;
  2323. unsigned volume = cb->args[1];
  2324. /* Open coded, deferred, iteration:
  2325. * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
  2326. * idr_for_each_entry(&tconn->volumes, mdev, i) {
  2327. * ...
  2328. * }
  2329. * }
  2330. * where tconn is cb->args[0];
  2331. * and i is cb->args[1];
  2332. *
  2333. * cb->args[2] indicates if we shall loop over all resources,
  2334. * or just dump all volumes of a single resource.
  2335. *
  2336. * This may miss entries inserted after this dump started,
  2337. * or entries deleted before they are reached.
  2338. *
  2339. * We need to make sure the mdev won't disappear while
  2340. * we are looking at it, and revalidate our iterators
  2341. * on each iteration.
  2342. */
  2343. /* synchronize with conn_create()/conn_destroy() */
  2344. rcu_read_lock();
  2345. /* revalidate iterator position */
  2346. list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
  2347. if (pos == NULL) {
  2348. /* first iteration */
  2349. pos = tmp;
  2350. tconn = pos;
  2351. break;
  2352. }
  2353. if (tmp == pos) {
  2354. tconn = pos;
  2355. break;
  2356. }
  2357. }
  2358. if (tconn) {
  2359. next_tconn:
  2360. mdev = idr_get_next(&tconn->volumes, &volume);
  2361. if (!mdev) {
  2362. /* No more volumes to dump on this tconn.
  2363. * Advance tconn iterator. */
  2364. pos = list_entry_rcu(tconn->all_tconn.next,
  2365. struct drbd_tconn, all_tconn);
  2366. /* Did we dump any volume on this tconn yet? */
  2367. if (volume != 0) {
  2368. /* If we reached the end of the list,
  2369. * or only a single resource dump was requested,
  2370. * we are done. */
  2371. if (&pos->all_tconn == &drbd_tconns || cb->args[2])
  2372. goto out;
  2373. volume = 0;
  2374. tconn = pos;
  2375. goto next_tconn;
  2376. }
  2377. }
  2378. dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
  2379. cb->nlh->nlmsg_seq, &drbd_genl_family,
  2380. NLM_F_MULTI, DRBD_ADM_GET_STATUS);
  2381. if (!dh)
  2382. goto out;
  2383. if (!mdev) {
  2384. /* This is a tconn without a single volume.
  2385. * Suprisingly enough, it may have a network
  2386. * configuration. */
  2387. struct net_conf *nc;
  2388. dh->minor = -1U;
  2389. dh->ret_code = NO_ERROR;
  2390. if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
  2391. goto cancel;
  2392. nc = rcu_dereference(tconn->net_conf);
  2393. if (nc && net_conf_to_skb(skb, nc, 1) != 0)
  2394. goto cancel;
  2395. goto done;
  2396. }
  2397. D_ASSERT(mdev->vnr == volume);
  2398. D_ASSERT(mdev->tconn == tconn);
  2399. dh->minor = mdev_to_minor(mdev);
  2400. dh->ret_code = NO_ERROR;
  2401. if (nla_put_status_info(skb, mdev, NULL)) {
  2402. cancel:
  2403. genlmsg_cancel(skb, dh);
  2404. goto out;
  2405. }
  2406. done:
  2407. genlmsg_end(skb, dh);
  2408. }
  2409. out:
  2410. rcu_read_unlock();
  2411. /* where to start the next iteration */
  2412. cb->args[0] = (long)pos;
  2413. cb->args[1] = (pos == tconn) ? volume + 1 : 0;
  2414. /* No more tconns/volumes/minors found results in an empty skb.
  2415. * Which will terminate the dump. */
  2416. return skb->len;
  2417. }
  2418. /*
  2419. * Request status of all resources, or of all volumes within a single resource.
  2420. *
  2421. * This is a dump, as the answer may not fit in a single reply skb otherwise.
  2422. * Which means we cannot use the family->attrbuf or other such members, because
  2423. * dump is NOT protected by the genl_lock(). During dump, we only have access
  2424. * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
  2425. *
  2426. * Once things are setup properly, we call into get_one_status().
  2427. */
  2428. int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
  2429. {
  2430. const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
  2431. struct nlattr *nla;
  2432. const char *resource_name;
  2433. struct drbd_tconn *tconn;
  2434. int maxtype;
  2435. /* Is this a followup call? */
  2436. if (cb->args[0]) {
  2437. /* ... of a single resource dump,
  2438. * and the resource iterator has been advanced already? */
  2439. if (cb->args[2] && cb->args[2] != cb->args[0])
  2440. return 0; /* DONE. */
  2441. goto dump;
  2442. }
  2443. /* First call (from netlink_dump_start). We need to figure out
  2444. * which resource(s) the user wants us to dump. */
  2445. nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
  2446. nlmsg_attrlen(cb->nlh, hdrlen),
  2447. DRBD_NLA_CFG_CONTEXT);
  2448. /* No explicit context given. Dump all. */
  2449. if (!nla)
  2450. goto dump;
  2451. maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
  2452. nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
  2453. if (IS_ERR(nla))
  2454. return PTR_ERR(nla);
  2455. /* context given, but no name present? */
  2456. if (!nla)
  2457. return -EINVAL;
  2458. resource_name = nla_data(nla);
  2459. tconn = conn_get_by_name(resource_name);
  2460. if (!tconn)
  2461. return -ENODEV;
  2462. kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
  2463. /* prime iterators, and set "filter" mode mark:
  2464. * only dump this tconn. */
  2465. cb->args[0] = (long)tconn;
  2466. /* cb->args[1] = 0; passed in this way. */
  2467. cb->args[2] = (long)tconn;
  2468. dump:
  2469. return get_one_status(skb, cb);
  2470. }
  2471. int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
  2472. {
  2473. enum drbd_ret_code retcode;
  2474. struct timeout_parms tp;
  2475. int err;
  2476. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2477. if (!adm_ctx.reply_skb)
  2478. return retcode;
  2479. if (retcode != NO_ERROR)
  2480. goto out;
  2481. tp.timeout_type =
  2482. adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
  2483. test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
  2484. UT_DEFAULT;
  2485. err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
  2486. if (err) {
  2487. nlmsg_free(adm_ctx.reply_skb);
  2488. return err;
  2489. }
  2490. out:
  2491. drbd_adm_finish(info, retcode);
  2492. return 0;
  2493. }
  2494. int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
  2495. {
  2496. struct drbd_conf *mdev;
  2497. enum drbd_ret_code retcode;
  2498. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2499. if (!adm_ctx.reply_skb)
  2500. return retcode;
  2501. if (retcode != NO_ERROR)
  2502. goto out;
  2503. mdev = adm_ctx.mdev;
  2504. if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
  2505. /* resume from last known position, if possible */
  2506. struct start_ov_parms parms =
  2507. { .ov_start_sector = mdev->ov_start_sector };
  2508. int err = start_ov_parms_from_attrs(&parms, info);
  2509. if (err) {
  2510. retcode = ERR_MANDATORY_TAG;
  2511. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2512. goto out;
  2513. }
  2514. /* w_make_ov_request expects position to be aligned */
  2515. mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
  2516. }
  2517. /* If there is still bitmap IO pending, e.g. previous resync or verify
  2518. * just being finished, wait for it before requesting a new resync. */
  2519. drbd_suspend_io(mdev);
  2520. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  2521. retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
  2522. drbd_resume_io(mdev);
  2523. out:
  2524. drbd_adm_finish(info, retcode);
  2525. return 0;
  2526. }
  2527. int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
  2528. {
  2529. struct drbd_conf *mdev;
  2530. enum drbd_ret_code retcode;
  2531. int skip_initial_sync = 0;
  2532. int err;
  2533. struct new_c_uuid_parms args;
  2534. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2535. if (!adm_ctx.reply_skb)
  2536. return retcode;
  2537. if (retcode != NO_ERROR)
  2538. goto out_nolock;
  2539. mdev = adm_ctx.mdev;
  2540. memset(&args, 0, sizeof(args));
  2541. if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
  2542. err = new_c_uuid_parms_from_attrs(&args, info);
  2543. if (err) {
  2544. retcode = ERR_MANDATORY_TAG;
  2545. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2546. goto out_nolock;
  2547. }
  2548. }
  2549. mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
  2550. if (!get_ldev(mdev)) {
  2551. retcode = ERR_NO_DISK;
  2552. goto out;
  2553. }
  2554. /* this is "skip initial sync", assume to be clean */
  2555. if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
  2556. mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
  2557. dev_info(DEV, "Preparing to skip initial sync\n");
  2558. skip_initial_sync = 1;
  2559. } else if (mdev->state.conn != C_STANDALONE) {
  2560. retcode = ERR_CONNECTED;
  2561. goto out_dec;
  2562. }
  2563. drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
  2564. drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
  2565. if (args.clear_bm) {
  2566. err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
  2567. "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
  2568. if (err) {
  2569. dev_err(DEV, "Writing bitmap failed with %d\n",err);
  2570. retcode = ERR_IO_MD_DISK;
  2571. }
  2572. if (skip_initial_sync) {
  2573. drbd_send_uuids_skip_initial_sync(mdev);
  2574. _drbd_uuid_set(mdev, UI_BITMAP, 0);
  2575. drbd_print_uuids(mdev, "cleared bitmap UUID");
  2576. spin_lock_irq(&mdev->tconn->req_lock);
  2577. _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
  2578. CS_VERBOSE, NULL);
  2579. spin_unlock_irq(&mdev->tconn->req_lock);
  2580. }
  2581. }
  2582. drbd_md_sync(mdev);
  2583. out_dec:
  2584. put_ldev(mdev);
  2585. out:
  2586. mutex_unlock(mdev->state_mutex);
  2587. out_nolock:
  2588. drbd_adm_finish(info, retcode);
  2589. return 0;
  2590. }
  2591. static enum drbd_ret_code
  2592. drbd_check_resource_name(const char *name)
  2593. {
  2594. if (!name || !name[0]) {
  2595. drbd_msg_put_info("resource name missing");
  2596. return ERR_MANDATORY_TAG;
  2597. }
  2598. /* if we want to use these in sysfs/configfs/debugfs some day,
  2599. * we must not allow slashes */
  2600. if (strchr(name, '/')) {
  2601. drbd_msg_put_info("invalid resource name");
  2602. return ERR_INVALID_REQUEST;
  2603. }
  2604. return NO_ERROR;
  2605. }
  2606. int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
  2607. {
  2608. enum drbd_ret_code retcode;
  2609. struct res_opts res_opts;
  2610. int err;
  2611. retcode = drbd_adm_prepare(skb, info, 0);
  2612. if (!adm_ctx.reply_skb)
  2613. return retcode;
  2614. if (retcode != NO_ERROR)
  2615. goto out;
  2616. set_res_opts_defaults(&res_opts);
  2617. err = res_opts_from_attrs(&res_opts, info);
  2618. if (err && err != -ENOMSG) {
  2619. retcode = ERR_MANDATORY_TAG;
  2620. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2621. goto out;
  2622. }
  2623. retcode = drbd_check_resource_name(adm_ctx.resource_name);
  2624. if (retcode != NO_ERROR)
  2625. goto out;
  2626. if (adm_ctx.tconn) {
  2627. if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
  2628. retcode = ERR_INVALID_REQUEST;
  2629. drbd_msg_put_info("resource exists");
  2630. }
  2631. /* else: still NO_ERROR */
  2632. goto out;
  2633. }
  2634. if (!conn_create(adm_ctx.resource_name, &res_opts))
  2635. retcode = ERR_NOMEM;
  2636. out:
  2637. drbd_adm_finish(info, retcode);
  2638. return 0;
  2639. }
  2640. int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
  2641. {
  2642. struct drbd_genlmsghdr *dh = info->userhdr;
  2643. enum drbd_ret_code retcode;
  2644. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
  2645. if (!adm_ctx.reply_skb)
  2646. return retcode;
  2647. if (retcode != NO_ERROR)
  2648. goto out;
  2649. if (dh->minor > MINORMASK) {
  2650. drbd_msg_put_info("requested minor out of range");
  2651. retcode = ERR_INVALID_REQUEST;
  2652. goto out;
  2653. }
  2654. if (adm_ctx.volume > DRBD_VOLUME_MAX) {
  2655. drbd_msg_put_info("requested volume id out of range");
  2656. retcode = ERR_INVALID_REQUEST;
  2657. goto out;
  2658. }
  2659. /* drbd_adm_prepare made sure already
  2660. * that mdev->tconn and mdev->vnr match the request. */
  2661. if (adm_ctx.mdev) {
  2662. if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
  2663. retcode = ERR_MINOR_EXISTS;
  2664. /* else: still NO_ERROR */
  2665. goto out;
  2666. }
  2667. retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
  2668. out:
  2669. drbd_adm_finish(info, retcode);
  2670. return 0;
  2671. }
  2672. static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
  2673. {
  2674. if (mdev->state.disk == D_DISKLESS &&
  2675. /* no need to be mdev->state.conn == C_STANDALONE &&
  2676. * we may want to delete a minor from a live replication group.
  2677. */
  2678. mdev->state.role == R_SECONDARY) {
  2679. _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
  2680. CS_VERBOSE + CS_WAIT_COMPLETE);
  2681. idr_remove(&mdev->tconn->volumes, mdev->vnr);
  2682. idr_remove(&minors, mdev_to_minor(mdev));
  2683. del_gendisk(mdev->vdisk);
  2684. synchronize_rcu();
  2685. kref_put(&mdev->kref, &drbd_minor_destroy);
  2686. return NO_ERROR;
  2687. } else
  2688. return ERR_MINOR_CONFIGURED;
  2689. }
  2690. int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
  2691. {
  2692. enum drbd_ret_code retcode;
  2693. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2694. if (!adm_ctx.reply_skb)
  2695. return retcode;
  2696. if (retcode != NO_ERROR)
  2697. goto out;
  2698. retcode = adm_delete_minor(adm_ctx.mdev);
  2699. out:
  2700. drbd_adm_finish(info, retcode);
  2701. return 0;
  2702. }
  2703. int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
  2704. {
  2705. int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
  2706. struct drbd_conf *mdev;
  2707. unsigned i;
  2708. retcode = drbd_adm_prepare(skb, info, 0);
  2709. if (!adm_ctx.reply_skb)
  2710. return retcode;
  2711. if (retcode != NO_ERROR)
  2712. goto out;
  2713. if (!adm_ctx.tconn) {
  2714. retcode = ERR_RES_NOT_KNOWN;
  2715. goto out;
  2716. }
  2717. /* demote */
  2718. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2719. retcode = drbd_set_role(mdev, R_SECONDARY, 0);
  2720. if (retcode < SS_SUCCESS) {
  2721. drbd_msg_put_info("failed to demote");
  2722. goto out;
  2723. }
  2724. }
  2725. retcode = conn_try_disconnect(adm_ctx.tconn, 0);
  2726. if (retcode < SS_SUCCESS) {
  2727. drbd_msg_put_info("failed to disconnect");
  2728. goto out;
  2729. }
  2730. /* detach */
  2731. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2732. retcode = adm_detach(mdev, 0);
  2733. if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
  2734. drbd_msg_put_info("failed to detach");
  2735. goto out;
  2736. }
  2737. }
  2738. /* If we reach this, all volumes (of this tconn) are Secondary,
  2739. * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
  2740. * actually stopped, state handling only does drbd_thread_stop_nowait(). */
  2741. drbd_thread_stop(&adm_ctx.tconn->worker);
  2742. /* Now, nothing can fail anymore */
  2743. /* delete volumes */
  2744. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2745. retcode = adm_delete_minor(mdev);
  2746. if (retcode != NO_ERROR) {
  2747. /* "can not happen" */
  2748. drbd_msg_put_info("failed to delete volume");
  2749. goto out;
  2750. }
  2751. }
  2752. /* delete connection */
  2753. if (conn_lowest_minor(adm_ctx.tconn) < 0) {
  2754. list_del_rcu(&adm_ctx.tconn->all_tconn);
  2755. synchronize_rcu();
  2756. kref_put(&adm_ctx.tconn->kref, &conn_destroy);
  2757. retcode = NO_ERROR;
  2758. } else {
  2759. /* "can not happen" */
  2760. retcode = ERR_RES_IN_USE;
  2761. drbd_msg_put_info("failed to delete connection");
  2762. }
  2763. goto out;
  2764. out:
  2765. drbd_adm_finish(info, retcode);
  2766. return 0;
  2767. }
  2768. int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
  2769. {
  2770. enum drbd_ret_code retcode;
  2771. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
  2772. if (!adm_ctx.reply_skb)
  2773. return retcode;
  2774. if (retcode != NO_ERROR)
  2775. goto out;
  2776. if (conn_lowest_minor(adm_ctx.tconn) < 0) {
  2777. list_del_rcu(&adm_ctx.tconn->all_tconn);
  2778. synchronize_rcu();
  2779. kref_put(&adm_ctx.tconn->kref, &conn_destroy);
  2780. retcode = NO_ERROR;
  2781. } else {
  2782. retcode = ERR_RES_IN_USE;
  2783. }
  2784. if (retcode == NO_ERROR)
  2785. drbd_thread_stop(&adm_ctx.tconn->worker);
  2786. out:
  2787. drbd_adm_finish(info, retcode);
  2788. return 0;
  2789. }
  2790. void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
  2791. {
  2792. static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
  2793. struct sk_buff *msg;
  2794. struct drbd_genlmsghdr *d_out;
  2795. unsigned seq;
  2796. int err = -ENOMEM;
  2797. seq = atomic_inc_return(&drbd_genl_seq);
  2798. msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
  2799. if (!msg)
  2800. goto failed;
  2801. err = -EMSGSIZE;
  2802. d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
  2803. if (!d_out) /* cannot happen, but anyways. */
  2804. goto nla_put_failure;
  2805. d_out->minor = mdev_to_minor(mdev);
  2806. d_out->ret_code = NO_ERROR;
  2807. if (nla_put_status_info(msg, mdev, sib))
  2808. goto nla_put_failure;
  2809. genlmsg_end(msg, d_out);
  2810. err = drbd_genl_multicast_events(msg, 0);
  2811. /* msg has been consumed or freed in netlink_broadcast() */
  2812. if (err && err != -ESRCH)
  2813. goto failed;
  2814. return;
  2815. nla_put_failure:
  2816. nlmsg_free(msg);
  2817. failed:
  2818. dev_err(DEV, "Error %d while broadcasting event. "
  2819. "Event seq:%u sib_reason:%u\n",
  2820. err, seq, sib->sib_reason);
  2821. }