drbd_nl.c 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229
  1. /*
  2. drbd_nl.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/drbd.h>
  21. #include <linux/in.h>
  22. #include <linux/fs.h>
  23. #include <linux/file.h>
  24. #include <linux/slab.h>
  25. #include <linux/blkpg.h>
  26. #include <linux/cpumask.h>
  27. #include "drbd_int.h"
  28. #include "drbd_req.h"
  29. #include "drbd_wrappers.h"
  30. #include <asm/unaligned.h>
  31. #include <linux/drbd_limits.h>
  32. #include <linux/kthread.h>
  33. #include <net/genetlink.h>
  34. /* .doit */
  35. // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
  36. // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
  37. int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
  38. int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
  39. int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
  40. int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
  41. int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
  42. int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
  43. int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
  44. int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
  45. int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
  46. int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
  47. int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
  48. int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
  49. int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
  50. int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
  51. int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
  52. int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
  53. int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
  54. int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
  55. int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
  56. int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
  57. int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
  58. int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
  59. int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
  60. int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
  61. int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
  62. /* .dumpit */
  63. int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
  64. #include <linux/drbd_genl_api.h>
  65. #include "drbd_nla.h"
  66. #include <linux/genl_magic_func.h>
  67. /* used blkdev_get_by_path, to claim our meta data device(s) */
  68. static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
  69. /* Configuration is strictly serialized, because generic netlink message
  70. * processing is strictly serialized by the genl_lock().
  71. * Which means we can use one static global drbd_config_context struct.
  72. */
  73. static struct drbd_config_context {
  74. /* assigned from drbd_genlmsghdr */
  75. unsigned int minor;
  76. /* assigned from request attributes, if present */
  77. unsigned int volume;
  78. #define VOLUME_UNSPECIFIED (-1U)
  79. /* pointer into the request skb,
  80. * limited lifetime! */
  81. char *resource_name;
  82. struct nlattr *my_addr;
  83. struct nlattr *peer_addr;
  84. /* reply buffer */
  85. struct sk_buff *reply_skb;
  86. /* pointer into reply buffer */
  87. struct drbd_genlmsghdr *reply_dh;
  88. /* resolved from attributes, if possible */
  89. struct drbd_conf *mdev;
  90. struct drbd_tconn *tconn;
  91. } adm_ctx;
  92. static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
  93. {
  94. genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
  95. if (genlmsg_reply(skb, info))
  96. printk(KERN_ERR "drbd: error sending genl reply\n");
  97. }
  98. /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
  99. * reason it could fail was no space in skb, and there are 4k available. */
  100. int drbd_msg_put_info(const char *info)
  101. {
  102. struct sk_buff *skb = adm_ctx.reply_skb;
  103. struct nlattr *nla;
  104. int err = -EMSGSIZE;
  105. if (!info || !info[0])
  106. return 0;
  107. nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
  108. if (!nla)
  109. return err;
  110. err = nla_put_string(skb, T_info_text, info);
  111. if (err) {
  112. nla_nest_cancel(skb, nla);
  113. return err;
  114. } else
  115. nla_nest_end(skb, nla);
  116. return 0;
  117. }
  118. /* This would be a good candidate for a "pre_doit" hook,
  119. * and per-family private info->pointers.
  120. * But we need to stay compatible with older kernels.
  121. * If it returns successfully, adm_ctx members are valid.
  122. */
  123. #define DRBD_ADM_NEED_MINOR 1
  124. #define DRBD_ADM_NEED_RESOURCE 2
  125. #define DRBD_ADM_NEED_CONNECTION 4
  126. static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
  127. unsigned flags)
  128. {
  129. struct drbd_genlmsghdr *d_in = info->userhdr;
  130. const u8 cmd = info->genlhdr->cmd;
  131. int err;
  132. memset(&adm_ctx, 0, sizeof(adm_ctx));
  133. /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
  134. if (cmd != DRBD_ADM_GET_STATUS
  135. && security_netlink_recv(skb, CAP_SYS_ADMIN))
  136. return -EPERM;
  137. adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  138. if (!adm_ctx.reply_skb) {
  139. err = -ENOMEM;
  140. goto fail;
  141. }
  142. adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
  143. info, &drbd_genl_family, 0, cmd);
  144. /* put of a few bytes into a fresh skb of >= 4k will always succeed.
  145. * but anyways */
  146. if (!adm_ctx.reply_dh) {
  147. err = -ENOMEM;
  148. goto fail;
  149. }
  150. adm_ctx.reply_dh->minor = d_in->minor;
  151. adm_ctx.reply_dh->ret_code = NO_ERROR;
  152. adm_ctx.volume = VOLUME_UNSPECIFIED;
  153. if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
  154. struct nlattr *nla;
  155. /* parse and validate only */
  156. err = drbd_cfg_context_from_attrs(NULL, info);
  157. if (err)
  158. goto fail;
  159. /* It was present, and valid,
  160. * copy it over to the reply skb. */
  161. err = nla_put_nohdr(adm_ctx.reply_skb,
  162. info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
  163. info->attrs[DRBD_NLA_CFG_CONTEXT]);
  164. if (err)
  165. goto fail;
  166. /* and assign stuff to the global adm_ctx */
  167. nla = nested_attr_tb[__nla_type(T_ctx_volume)];
  168. if (nla)
  169. adm_ctx.volume = nla_get_u32(nla);
  170. nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
  171. if (nla)
  172. adm_ctx.resource_name = nla_data(nla);
  173. adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
  174. adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
  175. if ((adm_ctx.my_addr &&
  176. nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
  177. (adm_ctx.peer_addr &&
  178. nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
  179. err = -EINVAL;
  180. goto fail;
  181. }
  182. }
  183. adm_ctx.minor = d_in->minor;
  184. adm_ctx.mdev = minor_to_mdev(d_in->minor);
  185. adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
  186. if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
  187. drbd_msg_put_info("unknown minor");
  188. return ERR_MINOR_INVALID;
  189. }
  190. if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
  191. drbd_msg_put_info("unknown resource");
  192. return ERR_INVALID_REQUEST;
  193. }
  194. if (flags & DRBD_ADM_NEED_CONNECTION) {
  195. if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
  196. drbd_msg_put_info("no resource name expected");
  197. return ERR_INVALID_REQUEST;
  198. }
  199. if (adm_ctx.mdev) {
  200. drbd_msg_put_info("no minor number expected");
  201. return ERR_INVALID_REQUEST;
  202. }
  203. if (adm_ctx.my_addr && adm_ctx.peer_addr)
  204. adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
  205. nla_len(adm_ctx.my_addr),
  206. nla_data(adm_ctx.peer_addr),
  207. nla_len(adm_ctx.peer_addr));
  208. if (!adm_ctx.tconn) {
  209. drbd_msg_put_info("unknown connection");
  210. return ERR_INVALID_REQUEST;
  211. }
  212. }
  213. /* some more paranoia, if the request was over-determined */
  214. if (adm_ctx.mdev && adm_ctx.tconn &&
  215. adm_ctx.mdev->tconn != adm_ctx.tconn) {
  216. pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
  217. adm_ctx.minor, adm_ctx.resource_name,
  218. adm_ctx.mdev->tconn->name);
  219. drbd_msg_put_info("minor exists in different resource");
  220. return ERR_INVALID_REQUEST;
  221. }
  222. if (adm_ctx.mdev &&
  223. adm_ctx.volume != VOLUME_UNSPECIFIED &&
  224. adm_ctx.volume != adm_ctx.mdev->vnr) {
  225. pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
  226. adm_ctx.minor, adm_ctx.volume,
  227. adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
  228. drbd_msg_put_info("minor exists as different volume");
  229. return ERR_INVALID_REQUEST;
  230. }
  231. return NO_ERROR;
  232. fail:
  233. nlmsg_free(adm_ctx.reply_skb);
  234. adm_ctx.reply_skb = NULL;
  235. return err;
  236. }
  237. static int drbd_adm_finish(struct genl_info *info, int retcode)
  238. {
  239. if (adm_ctx.tconn) {
  240. kref_put(&adm_ctx.tconn->kref, &conn_destroy);
  241. adm_ctx.tconn = NULL;
  242. }
  243. if (!adm_ctx.reply_skb)
  244. return -ENOMEM;
  245. adm_ctx.reply_dh->ret_code = retcode;
  246. drbd_adm_send_reply(adm_ctx.reply_skb, info);
  247. return 0;
  248. }
  249. static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
  250. {
  251. char *afs;
  252. /* FIXME: A future version will not allow this case. */
  253. if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
  254. return;
  255. switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
  256. case AF_INET6:
  257. afs = "ipv6";
  258. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
  259. &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
  260. break;
  261. case AF_INET:
  262. afs = "ipv4";
  263. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
  264. &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
  265. break;
  266. default:
  267. afs = "ssocks";
  268. snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
  269. &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
  270. }
  271. snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
  272. }
  273. int drbd_khelper(struct drbd_conf *mdev, char *cmd)
  274. {
  275. char *envp[] = { "HOME=/",
  276. "TERM=linux",
  277. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  278. (char[20]) { }, /* address family */
  279. (char[60]) { }, /* address */
  280. NULL };
  281. char mb[12];
  282. char *argv[] = {usermode_helper, cmd, mb, NULL };
  283. struct sib_info sib;
  284. int ret;
  285. snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
  286. setup_khelper_env(mdev->tconn, envp);
  287. /* The helper may take some time.
  288. * write out any unsynced meta data changes now */
  289. drbd_md_sync(mdev);
  290. dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
  291. sib.sib_reason = SIB_HELPER_PRE;
  292. sib.helper_name = cmd;
  293. drbd_bcast_event(mdev, &sib);
  294. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  295. if (ret)
  296. dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  297. usermode_helper, cmd, mb,
  298. (ret >> 8) & 0xff, ret);
  299. else
  300. dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
  301. usermode_helper, cmd, mb,
  302. (ret >> 8) & 0xff, ret);
  303. sib.sib_reason = SIB_HELPER_POST;
  304. sib.helper_exit_code = ret;
  305. drbd_bcast_event(mdev, &sib);
  306. if (ret < 0) /* Ignore any ERRNOs we got. */
  307. ret = 0;
  308. return ret;
  309. }
  310. static void conn_md_sync(struct drbd_tconn *tconn)
  311. {
  312. struct drbd_conf *mdev;
  313. int vnr;
  314. rcu_read_lock();
  315. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  316. kref_get(&mdev->kref);
  317. rcu_read_unlock();
  318. drbd_md_sync(mdev);
  319. kref_put(&mdev->kref, &drbd_minor_destroy);
  320. rcu_read_lock();
  321. }
  322. rcu_read_unlock();
  323. }
  324. int conn_khelper(struct drbd_tconn *tconn, char *cmd)
  325. {
  326. char *envp[] = { "HOME=/",
  327. "TERM=linux",
  328. "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
  329. (char[20]) { }, /* address family */
  330. (char[60]) { }, /* address */
  331. NULL };
  332. char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
  333. int ret;
  334. setup_khelper_env(tconn, envp);
  335. conn_md_sync(tconn);
  336. conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
  337. /* TODO: conn_bcast_event() ?? */
  338. ret = call_usermodehelper(usermode_helper, argv, envp, 1);
  339. if (ret)
  340. conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
  341. usermode_helper, cmd, tconn->name,
  342. (ret >> 8) & 0xff, ret);
  343. else
  344. conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
  345. usermode_helper, cmd, tconn->name,
  346. (ret >> 8) & 0xff, ret);
  347. /* TODO: conn_bcast_event() ?? */
  348. if (ret < 0) /* Ignore any ERRNOs we got. */
  349. ret = 0;
  350. return ret;
  351. }
  352. static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
  353. {
  354. enum drbd_fencing_p fp = FP_NOT_AVAIL;
  355. struct drbd_conf *mdev;
  356. int vnr;
  357. rcu_read_lock();
  358. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  359. if (get_ldev_if_state(mdev, D_CONSISTENT)) {
  360. fp = max_t(enum drbd_fencing_p, fp,
  361. rcu_dereference(mdev->ldev->disk_conf)->fencing);
  362. put_ldev(mdev);
  363. }
  364. }
  365. rcu_read_unlock();
  366. return fp;
  367. }
  368. bool conn_try_outdate_peer(struct drbd_tconn *tconn)
  369. {
  370. union drbd_state mask = { };
  371. union drbd_state val = { };
  372. enum drbd_fencing_p fp;
  373. char *ex_to_string;
  374. int r;
  375. if (tconn->cstate >= C_WF_REPORT_PARAMS) {
  376. conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
  377. return false;
  378. }
  379. fp = highest_fencing_policy(tconn);
  380. switch (fp) {
  381. case FP_NOT_AVAIL:
  382. conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
  383. goto out;
  384. case FP_DONT_CARE:
  385. return true;
  386. default: ;
  387. }
  388. r = conn_khelper(tconn, "fence-peer");
  389. switch ((r>>8) & 0xff) {
  390. case 3: /* peer is inconsistent */
  391. ex_to_string = "peer is inconsistent or worse";
  392. mask.pdsk = D_MASK;
  393. val.pdsk = D_INCONSISTENT;
  394. break;
  395. case 4: /* peer got outdated, or was already outdated */
  396. ex_to_string = "peer was fenced";
  397. mask.pdsk = D_MASK;
  398. val.pdsk = D_OUTDATED;
  399. break;
  400. case 5: /* peer was down */
  401. if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
  402. /* we will(have) create(d) a new UUID anyways... */
  403. ex_to_string = "peer is unreachable, assumed to be dead";
  404. mask.pdsk = D_MASK;
  405. val.pdsk = D_OUTDATED;
  406. } else {
  407. ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
  408. }
  409. break;
  410. case 6: /* Peer is primary, voluntarily outdate myself.
  411. * This is useful when an unconnected R_SECONDARY is asked to
  412. * become R_PRIMARY, but finds the other peer being active. */
  413. ex_to_string = "peer is active";
  414. conn_warn(tconn, "Peer is primary, outdating myself.\n");
  415. mask.disk = D_MASK;
  416. val.disk = D_OUTDATED;
  417. break;
  418. case 7:
  419. if (fp != FP_STONITH)
  420. conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
  421. ex_to_string = "peer was stonithed";
  422. mask.pdsk = D_MASK;
  423. val.pdsk = D_OUTDATED;
  424. break;
  425. default:
  426. /* The script is broken ... */
  427. conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
  428. return false; /* Eventually leave IO frozen */
  429. }
  430. conn_info(tconn, "fence-peer helper returned %d (%s)\n",
  431. (r>>8) & 0xff, ex_to_string);
  432. out:
  433. /* Not using
  434. conn_request_state(tconn, mask, val, CS_VERBOSE);
  435. here, because we might were able to re-establish the connection in the
  436. meantime. */
  437. spin_lock_irq(&tconn->req_lock);
  438. if (tconn->cstate < C_WF_REPORT_PARAMS)
  439. _conn_request_state(tconn, mask, val, CS_VERBOSE);
  440. spin_unlock_irq(&tconn->req_lock);
  441. return conn_highest_pdsk(tconn) <= D_OUTDATED;
  442. }
  443. static int _try_outdate_peer_async(void *data)
  444. {
  445. struct drbd_tconn *tconn = (struct drbd_tconn *)data;
  446. conn_try_outdate_peer(tconn);
  447. kref_put(&tconn->kref, &conn_destroy);
  448. return 0;
  449. }
  450. void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
  451. {
  452. struct task_struct *opa;
  453. kref_get(&tconn->kref);
  454. opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
  455. if (IS_ERR(opa)) {
  456. conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
  457. kref_put(&tconn->kref, &conn_destroy);
  458. }
  459. }
  460. enum drbd_state_rv
  461. drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
  462. {
  463. const int max_tries = 4;
  464. enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
  465. struct net_conf *nc;
  466. int try = 0;
  467. int forced = 0;
  468. union drbd_state mask, val;
  469. if (new_role == R_PRIMARY)
  470. request_ping(mdev->tconn); /* Detect a dead peer ASAP */
  471. mutex_lock(mdev->state_mutex);
  472. mask.i = 0; mask.role = R_MASK;
  473. val.i = 0; val.role = new_role;
  474. while (try++ < max_tries) {
  475. rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
  476. /* in case we first succeeded to outdate,
  477. * but now suddenly could establish a connection */
  478. if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
  479. val.pdsk = 0;
  480. mask.pdsk = 0;
  481. continue;
  482. }
  483. if (rv == SS_NO_UP_TO_DATE_DISK && force &&
  484. (mdev->state.disk < D_UP_TO_DATE &&
  485. mdev->state.disk >= D_INCONSISTENT)) {
  486. mask.disk = D_MASK;
  487. val.disk = D_UP_TO_DATE;
  488. forced = 1;
  489. continue;
  490. }
  491. if (rv == SS_NO_UP_TO_DATE_DISK &&
  492. mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
  493. D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
  494. if (conn_try_outdate_peer(mdev->tconn)) {
  495. val.disk = D_UP_TO_DATE;
  496. mask.disk = D_MASK;
  497. }
  498. continue;
  499. }
  500. if (rv == SS_NOTHING_TO_DO)
  501. goto out;
  502. if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
  503. if (!conn_try_outdate_peer(mdev->tconn) && force) {
  504. dev_warn(DEV, "Forced into split brain situation!\n");
  505. mask.pdsk = D_MASK;
  506. val.pdsk = D_OUTDATED;
  507. }
  508. continue;
  509. }
  510. if (rv == SS_TWO_PRIMARIES) {
  511. /* Maybe the peer is detected as dead very soon...
  512. retry at most once more in this case. */
  513. int timeo;
  514. rcu_read_lock();
  515. nc = rcu_dereference(mdev->tconn->net_conf);
  516. timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
  517. rcu_read_unlock();
  518. schedule_timeout_interruptible(timeo);
  519. if (try < max_tries)
  520. try = max_tries - 1;
  521. continue;
  522. }
  523. if (rv < SS_SUCCESS) {
  524. rv = _drbd_request_state(mdev, mask, val,
  525. CS_VERBOSE + CS_WAIT_COMPLETE);
  526. if (rv < SS_SUCCESS)
  527. goto out;
  528. }
  529. break;
  530. }
  531. if (rv < SS_SUCCESS)
  532. goto out;
  533. if (forced)
  534. dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
  535. /* Wait until nothing is on the fly :) */
  536. wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
  537. if (new_role == R_SECONDARY) {
  538. set_disk_ro(mdev->vdisk, true);
  539. if (get_ldev(mdev)) {
  540. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  541. put_ldev(mdev);
  542. }
  543. } else {
  544. mutex_lock(&mdev->tconn->conf_update);
  545. nc = mdev->tconn->net_conf;
  546. if (nc)
  547. nc->discard_my_data = 0; /* without copy; single bit op is atomic */
  548. mutex_unlock(&mdev->tconn->conf_update);
  549. set_disk_ro(mdev->vdisk, false);
  550. if (get_ldev(mdev)) {
  551. if (((mdev->state.conn < C_CONNECTED ||
  552. mdev->state.pdsk <= D_FAILED)
  553. && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
  554. drbd_uuid_new_current(mdev);
  555. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  556. put_ldev(mdev);
  557. }
  558. }
  559. /* writeout of activity log covered areas of the bitmap
  560. * to stable storage done in after state change already */
  561. if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
  562. /* if this was forced, we should consider sync */
  563. if (forced)
  564. drbd_send_uuids(mdev);
  565. drbd_send_state(mdev);
  566. }
  567. drbd_md_sync(mdev);
  568. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  569. out:
  570. mutex_unlock(mdev->state_mutex);
  571. return rv;
  572. }
  573. static const char *from_attrs_err_to_txt(int err)
  574. {
  575. return err == -ENOMSG ? "required attribute missing" :
  576. err == -EOPNOTSUPP ? "unknown mandatory attribute" :
  577. err == -EEXIST ? "can not change invariant setting" :
  578. "invalid attribute value";
  579. }
  580. int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
  581. {
  582. struct set_role_parms parms;
  583. int err;
  584. enum drbd_ret_code retcode;
  585. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  586. if (!adm_ctx.reply_skb)
  587. return retcode;
  588. if (retcode != NO_ERROR)
  589. goto out;
  590. memset(&parms, 0, sizeof(parms));
  591. if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
  592. err = set_role_parms_from_attrs(&parms, info);
  593. if (err) {
  594. retcode = ERR_MANDATORY_TAG;
  595. drbd_msg_put_info(from_attrs_err_to_txt(err));
  596. goto out;
  597. }
  598. }
  599. if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
  600. retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
  601. else
  602. retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
  603. out:
  604. drbd_adm_finish(info, retcode);
  605. return 0;
  606. }
  607. /* initializes the md.*_offset members, so we are able to find
  608. * the on disk meta data */
  609. static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
  610. struct drbd_backing_dev *bdev)
  611. {
  612. sector_t md_size_sect = 0;
  613. int meta_dev_idx;
  614. rcu_read_lock();
  615. meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
  616. switch (meta_dev_idx) {
  617. default:
  618. /* v07 style fixed size indexed meta data */
  619. bdev->md.md_size_sect = MD_RESERVED_SECT;
  620. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  621. bdev->md.al_offset = MD_AL_OFFSET;
  622. bdev->md.bm_offset = MD_BM_OFFSET;
  623. break;
  624. case DRBD_MD_INDEX_FLEX_EXT:
  625. /* just occupy the full device; unit: sectors */
  626. bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
  627. bdev->md.md_offset = 0;
  628. bdev->md.al_offset = MD_AL_OFFSET;
  629. bdev->md.bm_offset = MD_BM_OFFSET;
  630. break;
  631. case DRBD_MD_INDEX_INTERNAL:
  632. case DRBD_MD_INDEX_FLEX_INT:
  633. bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
  634. /* al size is still fixed */
  635. bdev->md.al_offset = -MD_AL_SECTORS;
  636. /* we need (slightly less than) ~ this much bitmap sectors: */
  637. md_size_sect = drbd_get_capacity(bdev->backing_bdev);
  638. md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
  639. md_size_sect = BM_SECT_TO_EXT(md_size_sect);
  640. md_size_sect = ALIGN(md_size_sect, 8);
  641. /* plus the "drbd meta data super block",
  642. * and the activity log; */
  643. md_size_sect += MD_BM_OFFSET;
  644. bdev->md.md_size_sect = md_size_sect;
  645. /* bitmap offset is adjusted by 'super' block size */
  646. bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
  647. break;
  648. }
  649. rcu_read_unlock();
  650. }
  651. /* input size is expected to be in KB */
  652. char *ppsize(char *buf, unsigned long long size)
  653. {
  654. /* Needs 9 bytes at max including trailing NUL:
  655. * -1ULL ==> "16384 EB" */
  656. static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
  657. int base = 0;
  658. while (size >= 10000 && base < sizeof(units)-1) {
  659. /* shift + round */
  660. size = (size >> 10) + !!(size & (1<<9));
  661. base++;
  662. }
  663. sprintf(buf, "%u %cB", (unsigned)size, units[base]);
  664. return buf;
  665. }
  666. /* there is still a theoretical deadlock when called from receiver
  667. * on an D_INCONSISTENT R_PRIMARY:
  668. * remote READ does inc_ap_bio, receiver would need to receive answer
  669. * packet from remote to dec_ap_bio again.
  670. * receiver receive_sizes(), comes here,
  671. * waits for ap_bio_cnt == 0. -> deadlock.
  672. * but this cannot happen, actually, because:
  673. * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
  674. * (not connected, or bad/no disk on peer):
  675. * see drbd_fail_request_early, ap_bio_cnt is zero.
  676. * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
  677. * peer may not initiate a resize.
  678. */
  679. /* Note these are not to be confused with
  680. * drbd_adm_suspend_io/drbd_adm_resume_io,
  681. * which are (sub) state changes triggered by admin (drbdsetup),
  682. * and can be long lived.
  683. * This changes an mdev->flag, is triggered by drbd internals,
  684. * and should be short-lived. */
  685. void drbd_suspend_io(struct drbd_conf *mdev)
  686. {
  687. set_bit(SUSPEND_IO, &mdev->flags);
  688. if (drbd_suspended(mdev))
  689. return;
  690. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
  691. }
  692. void drbd_resume_io(struct drbd_conf *mdev)
  693. {
  694. clear_bit(SUSPEND_IO, &mdev->flags);
  695. wake_up(&mdev->misc_wait);
  696. }
  697. /**
  698. * drbd_determine_dev_size() - Sets the right device size obeying all constraints
  699. * @mdev: DRBD device.
  700. *
  701. * Returns 0 on success, negative return values indicate errors.
  702. * You should call drbd_md_sync() after calling this function.
  703. */
  704. enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
  705. {
  706. sector_t prev_first_sect, prev_size; /* previous meta location */
  707. sector_t la_size, u_size;
  708. sector_t size;
  709. char ppb[10];
  710. int md_moved, la_size_changed;
  711. enum determine_dev_size rv = unchanged;
  712. /* race:
  713. * application request passes inc_ap_bio,
  714. * but then cannot get an AL-reference.
  715. * this function later may wait on ap_bio_cnt == 0. -> deadlock.
  716. *
  717. * to avoid that:
  718. * Suspend IO right here.
  719. * still lock the act_log to not trigger ASSERTs there.
  720. */
  721. drbd_suspend_io(mdev);
  722. /* no wait necessary anymore, actually we could assert that */
  723. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  724. prev_first_sect = drbd_md_first_sector(mdev->ldev);
  725. prev_size = mdev->ldev->md.md_size_sect;
  726. la_size = mdev->ldev->md.la_size_sect;
  727. /* TODO: should only be some assert here, not (re)init... */
  728. drbd_md_set_sector_offsets(mdev, mdev->ldev);
  729. rcu_read_lock();
  730. u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
  731. rcu_read_unlock();
  732. size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
  733. if (drbd_get_capacity(mdev->this_bdev) != size ||
  734. drbd_bm_capacity(mdev) != size) {
  735. int err;
  736. err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
  737. if (unlikely(err)) {
  738. /* currently there is only one error: ENOMEM! */
  739. size = drbd_bm_capacity(mdev)>>1;
  740. if (size == 0) {
  741. dev_err(DEV, "OUT OF MEMORY! "
  742. "Could not allocate bitmap!\n");
  743. } else {
  744. dev_err(DEV, "BM resizing failed. "
  745. "Leaving size unchanged at size = %lu KB\n",
  746. (unsigned long)size);
  747. }
  748. rv = dev_size_error;
  749. }
  750. /* racy, see comments above. */
  751. drbd_set_my_capacity(mdev, size);
  752. mdev->ldev->md.la_size_sect = size;
  753. dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
  754. (unsigned long long)size>>1);
  755. }
  756. if (rv == dev_size_error)
  757. goto out;
  758. la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
  759. md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
  760. || prev_size != mdev->ldev->md.md_size_sect;
  761. if (la_size_changed || md_moved) {
  762. int err;
  763. drbd_al_shrink(mdev); /* All extents inactive. */
  764. dev_info(DEV, "Writing the whole bitmap, %s\n",
  765. la_size_changed && md_moved ? "size changed and md moved" :
  766. la_size_changed ? "size changed" : "md moved");
  767. /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
  768. err = drbd_bitmap_io(mdev, &drbd_bm_write,
  769. "size changed", BM_LOCKED_MASK);
  770. if (err) {
  771. rv = dev_size_error;
  772. goto out;
  773. }
  774. drbd_md_mark_dirty(mdev);
  775. }
  776. if (size > la_size)
  777. rv = grew;
  778. if (size < la_size)
  779. rv = shrunk;
  780. out:
  781. lc_unlock(mdev->act_log);
  782. wake_up(&mdev->al_wait);
  783. drbd_resume_io(mdev);
  784. return rv;
  785. }
  786. sector_t
  787. drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
  788. sector_t u_size, int assume_peer_has_space)
  789. {
  790. sector_t p_size = mdev->p_size; /* partner's disk size. */
  791. sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
  792. sector_t m_size; /* my size */
  793. sector_t size = 0;
  794. m_size = drbd_get_max_capacity(bdev);
  795. if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
  796. dev_warn(DEV, "Resize while not connected was forced by the user!\n");
  797. p_size = m_size;
  798. }
  799. if (p_size && m_size) {
  800. size = min_t(sector_t, p_size, m_size);
  801. } else {
  802. if (la_size) {
  803. size = la_size;
  804. if (m_size && m_size < size)
  805. size = m_size;
  806. if (p_size && p_size < size)
  807. size = p_size;
  808. } else {
  809. if (m_size)
  810. size = m_size;
  811. if (p_size)
  812. size = p_size;
  813. }
  814. }
  815. if (size == 0)
  816. dev_err(DEV, "Both nodes diskless!\n");
  817. if (u_size) {
  818. if (u_size > size)
  819. dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
  820. (unsigned long)u_size>>1, (unsigned long)size>>1);
  821. else
  822. size = u_size;
  823. }
  824. return size;
  825. }
  826. /**
  827. * drbd_check_al_size() - Ensures that the AL is of the right size
  828. * @mdev: DRBD device.
  829. *
  830. * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
  831. * failed, and 0 on success. You should call drbd_md_sync() after you called
  832. * this function.
  833. */
  834. static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
  835. {
  836. struct lru_cache *n, *t;
  837. struct lc_element *e;
  838. unsigned int in_use;
  839. int i;
  840. if (mdev->act_log &&
  841. mdev->act_log->nr_elements == dc->al_extents)
  842. return 0;
  843. in_use = 0;
  844. t = mdev->act_log;
  845. n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
  846. dc->al_extents, sizeof(struct lc_element), 0);
  847. if (n == NULL) {
  848. dev_err(DEV, "Cannot allocate act_log lru!\n");
  849. return -ENOMEM;
  850. }
  851. spin_lock_irq(&mdev->al_lock);
  852. if (t) {
  853. for (i = 0; i < t->nr_elements; i++) {
  854. e = lc_element_by_index(t, i);
  855. if (e->refcnt)
  856. dev_err(DEV, "refcnt(%d)==%d\n",
  857. e->lc_number, e->refcnt);
  858. in_use += e->refcnt;
  859. }
  860. }
  861. if (!in_use)
  862. mdev->act_log = n;
  863. spin_unlock_irq(&mdev->al_lock);
  864. if (in_use) {
  865. dev_err(DEV, "Activity log still in use!\n");
  866. lc_destroy(n);
  867. return -EBUSY;
  868. } else {
  869. if (t)
  870. lc_destroy(t);
  871. }
  872. drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
  873. return 0;
  874. }
  875. static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
  876. {
  877. struct request_queue * const q = mdev->rq_queue;
  878. int max_hw_sectors = max_bio_size >> 9;
  879. int max_segments = 0;
  880. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  881. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  882. max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
  883. rcu_read_lock();
  884. max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
  885. rcu_read_unlock();
  886. put_ldev(mdev);
  887. }
  888. blk_queue_logical_block_size(q, 512);
  889. blk_queue_max_hw_sectors(q, max_hw_sectors);
  890. /* This is the workaround for "bio would need to, but cannot, be split" */
  891. blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
  892. blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
  893. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  894. struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
  895. blk_queue_stack_limits(q, b);
  896. if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
  897. dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
  898. q->backing_dev_info.ra_pages,
  899. b->backing_dev_info.ra_pages);
  900. q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
  901. }
  902. put_ldev(mdev);
  903. }
  904. }
  905. void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
  906. {
  907. int now, new, local, peer;
  908. now = queue_max_hw_sectors(mdev->rq_queue) << 9;
  909. local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
  910. peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
  911. if (get_ldev_if_state(mdev, D_ATTACHING)) {
  912. local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
  913. mdev->local_max_bio_size = local;
  914. put_ldev(mdev);
  915. }
  916. /* We may ignore peer limits if the peer is modern enough.
  917. Because new from 8.3.8 onwards the peer can use multiple
  918. BIOs for a single peer_request */
  919. if (mdev->state.conn >= C_CONNECTED) {
  920. if (mdev->tconn->agreed_pro_version < 94)
  921. peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
  922. /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
  923. else if (mdev->tconn->agreed_pro_version == 94)
  924. peer = DRBD_MAX_SIZE_H80_PACKET;
  925. else if (mdev->tconn->agreed_pro_version < 100)
  926. peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
  927. else
  928. peer = DRBD_MAX_BIO_SIZE;
  929. }
  930. new = min_t(int, local, peer);
  931. if (mdev->state.role == R_PRIMARY && new < now)
  932. dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
  933. if (new != now)
  934. dev_info(DEV, "max BIO size = %u\n", new);
  935. drbd_setup_queue_param(mdev, new);
  936. }
  937. /* Starts the worker thread */
  938. static void conn_reconfig_start(struct drbd_tconn *tconn)
  939. {
  940. drbd_thread_start(&tconn->worker);
  941. conn_flush_workqueue(tconn);
  942. }
  943. /* if still unconfigured, stops worker again. */
  944. static void conn_reconfig_done(struct drbd_tconn *tconn)
  945. {
  946. bool stop_threads;
  947. spin_lock_irq(&tconn->req_lock);
  948. stop_threads = conn_all_vols_unconf(tconn);
  949. spin_unlock_irq(&tconn->req_lock);
  950. if (stop_threads) {
  951. /* asender is implicitly stopped by receiver
  952. * in conn_disconnect() */
  953. drbd_thread_stop(&tconn->receiver);
  954. drbd_thread_stop(&tconn->worker);
  955. }
  956. }
  957. /* Make sure IO is suspended before calling this function(). */
  958. static void drbd_suspend_al(struct drbd_conf *mdev)
  959. {
  960. int s = 0;
  961. if (!lc_try_lock(mdev->act_log)) {
  962. dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
  963. return;
  964. }
  965. drbd_al_shrink(mdev);
  966. spin_lock_irq(&mdev->tconn->req_lock);
  967. if (mdev->state.conn < C_CONNECTED)
  968. s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
  969. spin_unlock_irq(&mdev->tconn->req_lock);
  970. lc_unlock(mdev->act_log);
  971. if (s)
  972. dev_info(DEV, "Suspended AL updates\n");
  973. }
  974. static bool should_set_defaults(struct genl_info *info)
  975. {
  976. unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
  977. return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
  978. }
  979. static void enforce_disk_conf_limits(struct disk_conf *dc)
  980. {
  981. if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
  982. dc->al_extents = DRBD_AL_EXTENTS_MIN;
  983. if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
  984. dc->al_extents = DRBD_AL_EXTENTS_MAX;
  985. if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
  986. dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
  987. }
  988. int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
  989. {
  990. enum drbd_ret_code retcode;
  991. struct drbd_conf *mdev;
  992. struct disk_conf *new_disk_conf, *old_disk_conf;
  993. struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
  994. int err, fifo_size;
  995. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  996. if (!adm_ctx.reply_skb)
  997. return retcode;
  998. if (retcode != NO_ERROR)
  999. goto out;
  1000. mdev = adm_ctx.mdev;
  1001. /* we also need a disk
  1002. * to change the options on */
  1003. if (!get_ldev(mdev)) {
  1004. retcode = ERR_NO_DISK;
  1005. goto out;
  1006. }
  1007. new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
  1008. if (!new_disk_conf) {
  1009. retcode = ERR_NOMEM;
  1010. goto fail;
  1011. }
  1012. mutex_lock(&mdev->tconn->conf_update);
  1013. old_disk_conf = mdev->ldev->disk_conf;
  1014. *new_disk_conf = *old_disk_conf;
  1015. if (should_set_defaults(info))
  1016. set_disk_conf_defaults(new_disk_conf);
  1017. err = disk_conf_from_attrs_for_change(new_disk_conf, info);
  1018. if (err && err != -ENOMSG) {
  1019. retcode = ERR_MANDATORY_TAG;
  1020. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1021. }
  1022. if (!expect(new_disk_conf->resync_rate >= 1))
  1023. new_disk_conf->resync_rate = 1;
  1024. enforce_disk_conf_limits(new_disk_conf);
  1025. fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
  1026. if (fifo_size != mdev->rs_plan_s->size) {
  1027. new_plan = fifo_alloc(fifo_size);
  1028. if (!new_plan) {
  1029. dev_err(DEV, "kmalloc of fifo_buffer failed");
  1030. retcode = ERR_NOMEM;
  1031. goto fail_unlock;
  1032. }
  1033. }
  1034. wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
  1035. drbd_al_shrink(mdev);
  1036. err = drbd_check_al_size(mdev, new_disk_conf);
  1037. lc_unlock(mdev->act_log);
  1038. wake_up(&mdev->al_wait);
  1039. if (err) {
  1040. retcode = ERR_NOMEM;
  1041. goto fail_unlock;
  1042. }
  1043. write_lock_irq(&global_state_lock);
  1044. retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
  1045. if (retcode == NO_ERROR) {
  1046. rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
  1047. drbd_resync_after_changed(mdev);
  1048. }
  1049. write_unlock_irq(&global_state_lock);
  1050. if (retcode != NO_ERROR)
  1051. goto fail_unlock;
  1052. if (new_plan) {
  1053. old_plan = mdev->rs_plan_s;
  1054. rcu_assign_pointer(mdev->rs_plan_s, new_plan);
  1055. }
  1056. mutex_unlock(&mdev->tconn->conf_update);
  1057. drbd_md_sync(mdev);
  1058. if (mdev->state.conn >= C_CONNECTED)
  1059. drbd_send_sync_param(mdev);
  1060. synchronize_rcu();
  1061. kfree(old_disk_conf);
  1062. kfree(old_plan);
  1063. mod_timer(&mdev->request_timer, jiffies + HZ);
  1064. goto success;
  1065. fail_unlock:
  1066. mutex_unlock(&mdev->tconn->conf_update);
  1067. fail:
  1068. kfree(new_disk_conf);
  1069. kfree(new_plan);
  1070. success:
  1071. put_ldev(mdev);
  1072. out:
  1073. drbd_adm_finish(info, retcode);
  1074. return 0;
  1075. }
  1076. int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
  1077. {
  1078. struct drbd_conf *mdev;
  1079. int err;
  1080. enum drbd_ret_code retcode;
  1081. enum determine_dev_size dd;
  1082. sector_t max_possible_sectors;
  1083. sector_t min_md_device_sectors;
  1084. struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
  1085. struct disk_conf *new_disk_conf = NULL;
  1086. struct block_device *bdev;
  1087. struct lru_cache *resync_lru = NULL;
  1088. struct fifo_buffer *new_plan = NULL;
  1089. union drbd_state ns, os;
  1090. enum drbd_state_rv rv;
  1091. struct net_conf *nc;
  1092. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1093. if (!adm_ctx.reply_skb)
  1094. return retcode;
  1095. if (retcode != NO_ERROR)
  1096. goto finish;
  1097. mdev = adm_ctx.mdev;
  1098. conn_reconfig_start(mdev->tconn);
  1099. /* if you want to reconfigure, please tear down first */
  1100. if (mdev->state.disk > D_DISKLESS) {
  1101. retcode = ERR_DISK_CONFIGURED;
  1102. goto fail;
  1103. }
  1104. /* It may just now have detached because of IO error. Make sure
  1105. * drbd_ldev_destroy is done already, we may end up here very fast,
  1106. * e.g. if someone calls attach from the on-io-error handler,
  1107. * to realize a "hot spare" feature (not that I'd recommend that) */
  1108. wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
  1109. /* allocation not in the IO path, drbdsetup context */
  1110. nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
  1111. if (!nbc) {
  1112. retcode = ERR_NOMEM;
  1113. goto fail;
  1114. }
  1115. new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
  1116. if (!new_disk_conf) {
  1117. retcode = ERR_NOMEM;
  1118. goto fail;
  1119. }
  1120. nbc->disk_conf = new_disk_conf;
  1121. set_disk_conf_defaults(new_disk_conf);
  1122. err = disk_conf_from_attrs(new_disk_conf, info);
  1123. if (err) {
  1124. retcode = ERR_MANDATORY_TAG;
  1125. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1126. goto fail;
  1127. }
  1128. enforce_disk_conf_limits(new_disk_conf);
  1129. new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
  1130. if (!new_plan) {
  1131. retcode = ERR_NOMEM;
  1132. goto fail;
  1133. }
  1134. if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
  1135. retcode = ERR_MD_IDX_INVALID;
  1136. goto fail;
  1137. }
  1138. rcu_read_lock();
  1139. nc = rcu_dereference(mdev->tconn->net_conf);
  1140. if (nc) {
  1141. if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
  1142. rcu_read_unlock();
  1143. retcode = ERR_STONITH_AND_PROT_A;
  1144. goto fail;
  1145. }
  1146. }
  1147. rcu_read_unlock();
  1148. bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
  1149. FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
  1150. if (IS_ERR(bdev)) {
  1151. dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
  1152. PTR_ERR(bdev));
  1153. retcode = ERR_OPEN_DISK;
  1154. goto fail;
  1155. }
  1156. nbc->backing_bdev = bdev;
  1157. /*
  1158. * meta_dev_idx >= 0: external fixed size, possibly multiple
  1159. * drbd sharing one meta device. TODO in that case, paranoia
  1160. * check that [md_bdev, meta_dev_idx] is not yet used by some
  1161. * other drbd minor! (if you use drbd.conf + drbdadm, that
  1162. * should check it for you already; but if you don't, or
  1163. * someone fooled it, we need to double check here)
  1164. */
  1165. bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
  1166. FMODE_READ | FMODE_WRITE | FMODE_EXCL,
  1167. (new_disk_conf->meta_dev_idx < 0) ?
  1168. (void *)mdev : (void *)drbd_m_holder);
  1169. if (IS_ERR(bdev)) {
  1170. dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
  1171. PTR_ERR(bdev));
  1172. retcode = ERR_OPEN_MD_DISK;
  1173. goto fail;
  1174. }
  1175. nbc->md_bdev = bdev;
  1176. if ((nbc->backing_bdev == nbc->md_bdev) !=
  1177. (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
  1178. new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
  1179. retcode = ERR_MD_IDX_INVALID;
  1180. goto fail;
  1181. }
  1182. resync_lru = lc_create("resync", drbd_bm_ext_cache,
  1183. 1, 61, sizeof(struct bm_extent),
  1184. offsetof(struct bm_extent, lce));
  1185. if (!resync_lru) {
  1186. retcode = ERR_NOMEM;
  1187. goto fail;
  1188. }
  1189. /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
  1190. drbd_md_set_sector_offsets(mdev, nbc);
  1191. if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
  1192. dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
  1193. (unsigned long long) drbd_get_max_capacity(nbc),
  1194. (unsigned long long) new_disk_conf->disk_size);
  1195. retcode = ERR_DISK_TOO_SMALL;
  1196. goto fail;
  1197. }
  1198. if (new_disk_conf->meta_dev_idx < 0) {
  1199. max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
  1200. /* at least one MB, otherwise it does not make sense */
  1201. min_md_device_sectors = (2<<10);
  1202. } else {
  1203. max_possible_sectors = DRBD_MAX_SECTORS;
  1204. min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
  1205. }
  1206. if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
  1207. retcode = ERR_MD_DISK_TOO_SMALL;
  1208. dev_warn(DEV, "refusing attach: md-device too small, "
  1209. "at least %llu sectors needed for this meta-disk type\n",
  1210. (unsigned long long) min_md_device_sectors);
  1211. goto fail;
  1212. }
  1213. /* Make sure the new disk is big enough
  1214. * (we may currently be R_PRIMARY with no local disk...) */
  1215. if (drbd_get_max_capacity(nbc) <
  1216. drbd_get_capacity(mdev->this_bdev)) {
  1217. retcode = ERR_DISK_TOO_SMALL;
  1218. goto fail;
  1219. }
  1220. nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
  1221. if (nbc->known_size > max_possible_sectors) {
  1222. dev_warn(DEV, "==> truncating very big lower level device "
  1223. "to currently maximum possible %llu sectors <==\n",
  1224. (unsigned long long) max_possible_sectors);
  1225. if (new_disk_conf->meta_dev_idx >= 0)
  1226. dev_warn(DEV, "==>> using internal or flexible "
  1227. "meta data may help <<==\n");
  1228. }
  1229. drbd_suspend_io(mdev);
  1230. /* also wait for the last barrier ack. */
  1231. wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
  1232. /* and for any other previously queued work */
  1233. drbd_flush_workqueue(mdev);
  1234. rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
  1235. retcode = rv; /* FIXME: Type mismatch. */
  1236. drbd_resume_io(mdev);
  1237. if (rv < SS_SUCCESS)
  1238. goto fail;
  1239. if (!get_ldev_if_state(mdev, D_ATTACHING))
  1240. goto force_diskless;
  1241. drbd_md_set_sector_offsets(mdev, nbc);
  1242. if (!mdev->bitmap) {
  1243. if (drbd_bm_init(mdev)) {
  1244. retcode = ERR_NOMEM;
  1245. goto force_diskless_dec;
  1246. }
  1247. }
  1248. retcode = drbd_md_read(mdev, nbc);
  1249. if (retcode != NO_ERROR)
  1250. goto force_diskless_dec;
  1251. if (mdev->state.conn < C_CONNECTED &&
  1252. mdev->state.role == R_PRIMARY &&
  1253. (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
  1254. dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
  1255. (unsigned long long)mdev->ed_uuid);
  1256. retcode = ERR_DATA_NOT_CURRENT;
  1257. goto force_diskless_dec;
  1258. }
  1259. /* Since we are diskless, fix the activity log first... */
  1260. if (drbd_check_al_size(mdev, new_disk_conf)) {
  1261. retcode = ERR_NOMEM;
  1262. goto force_diskless_dec;
  1263. }
  1264. /* Prevent shrinking of consistent devices ! */
  1265. if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
  1266. drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
  1267. dev_warn(DEV, "refusing to truncate a consistent device\n");
  1268. retcode = ERR_DISK_TOO_SMALL;
  1269. goto force_diskless_dec;
  1270. }
  1271. /* Reset the "barriers don't work" bits here, then force meta data to
  1272. * be written, to ensure we determine if barriers are supported. */
  1273. if (new_disk_conf->md_flushes)
  1274. clear_bit(MD_NO_FUA, &mdev->flags);
  1275. else
  1276. set_bit(MD_NO_FUA, &mdev->flags);
  1277. /* Point of no return reached.
  1278. * Devices and memory are no longer released by error cleanup below.
  1279. * now mdev takes over responsibility, and the state engine should
  1280. * clean it up somewhere. */
  1281. D_ASSERT(mdev->ldev == NULL);
  1282. mdev->ldev = nbc;
  1283. mdev->resync = resync_lru;
  1284. mdev->rs_plan_s = new_plan;
  1285. nbc = NULL;
  1286. resync_lru = NULL;
  1287. new_disk_conf = NULL;
  1288. new_plan = NULL;
  1289. mdev->write_ordering = WO_bdev_flush;
  1290. drbd_bump_write_ordering(mdev, WO_bdev_flush);
  1291. if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
  1292. set_bit(CRASHED_PRIMARY, &mdev->flags);
  1293. else
  1294. clear_bit(CRASHED_PRIMARY, &mdev->flags);
  1295. if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  1296. !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
  1297. set_bit(CRASHED_PRIMARY, &mdev->flags);
  1298. mdev->send_cnt = 0;
  1299. mdev->recv_cnt = 0;
  1300. mdev->read_cnt = 0;
  1301. mdev->writ_cnt = 0;
  1302. drbd_reconsider_max_bio_size(mdev);
  1303. /* If I am currently not R_PRIMARY,
  1304. * but meta data primary indicator is set,
  1305. * I just now recover from a hard crash,
  1306. * and have been R_PRIMARY before that crash.
  1307. *
  1308. * Now, if I had no connection before that crash
  1309. * (have been degraded R_PRIMARY), chances are that
  1310. * I won't find my peer now either.
  1311. *
  1312. * In that case, and _only_ in that case,
  1313. * we use the degr-wfc-timeout instead of the default,
  1314. * so we can automatically recover from a crash of a
  1315. * degraded but active "cluster" after a certain timeout.
  1316. */
  1317. clear_bit(USE_DEGR_WFC_T, &mdev->flags);
  1318. if (mdev->state.role != R_PRIMARY &&
  1319. drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
  1320. !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
  1321. set_bit(USE_DEGR_WFC_T, &mdev->flags);
  1322. dd = drbd_determine_dev_size(mdev, 0);
  1323. if (dd == dev_size_error) {
  1324. retcode = ERR_NOMEM_BITMAP;
  1325. goto force_diskless_dec;
  1326. } else if (dd == grew)
  1327. set_bit(RESYNC_AFTER_NEG, &mdev->flags);
  1328. if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
  1329. dev_info(DEV, "Assuming that all blocks are out of sync "
  1330. "(aka FullSync)\n");
  1331. if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
  1332. "set_n_write from attaching", BM_LOCKED_MASK)) {
  1333. retcode = ERR_IO_MD_DISK;
  1334. goto force_diskless_dec;
  1335. }
  1336. } else {
  1337. if (drbd_bitmap_io(mdev, &drbd_bm_read,
  1338. "read from attaching", BM_LOCKED_MASK)) {
  1339. retcode = ERR_IO_MD_DISK;
  1340. goto force_diskless_dec;
  1341. }
  1342. }
  1343. if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
  1344. drbd_suspend_al(mdev); /* IO is still suspended here... */
  1345. spin_lock_irq(&mdev->tconn->req_lock);
  1346. os = drbd_read_state(mdev);
  1347. ns = os;
  1348. /* If MDF_CONSISTENT is not set go into inconsistent state,
  1349. otherwise investigate MDF_WasUpToDate...
  1350. If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
  1351. otherwise into D_CONSISTENT state.
  1352. */
  1353. if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
  1354. if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
  1355. ns.disk = D_CONSISTENT;
  1356. else
  1357. ns.disk = D_OUTDATED;
  1358. } else {
  1359. ns.disk = D_INCONSISTENT;
  1360. }
  1361. if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
  1362. ns.pdsk = D_OUTDATED;
  1363. rcu_read_lock();
  1364. if (ns.disk == D_CONSISTENT &&
  1365. (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
  1366. ns.disk = D_UP_TO_DATE;
  1367. rcu_read_unlock();
  1368. /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
  1369. MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
  1370. this point, because drbd_request_state() modifies these
  1371. flags. */
  1372. /* In case we are C_CONNECTED postpone any decision on the new disk
  1373. state after the negotiation phase. */
  1374. if (mdev->state.conn == C_CONNECTED) {
  1375. mdev->new_state_tmp.i = ns.i;
  1376. ns.i = os.i;
  1377. ns.disk = D_NEGOTIATING;
  1378. /* We expect to receive up-to-date UUIDs soon.
  1379. To avoid a race in receive_state, free p_uuid while
  1380. holding req_lock. I.e. atomic with the state change */
  1381. kfree(mdev->p_uuid);
  1382. mdev->p_uuid = NULL;
  1383. }
  1384. rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
  1385. spin_unlock_irq(&mdev->tconn->req_lock);
  1386. if (rv < SS_SUCCESS)
  1387. goto force_diskless_dec;
  1388. mod_timer(&mdev->request_timer, jiffies + HZ);
  1389. if (mdev->state.role == R_PRIMARY)
  1390. mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
  1391. else
  1392. mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
  1393. drbd_md_mark_dirty(mdev);
  1394. drbd_md_sync(mdev);
  1395. kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
  1396. put_ldev(mdev);
  1397. conn_reconfig_done(mdev->tconn);
  1398. drbd_adm_finish(info, retcode);
  1399. return 0;
  1400. force_diskless_dec:
  1401. put_ldev(mdev);
  1402. force_diskless:
  1403. drbd_force_state(mdev, NS(disk, D_DISKLESS));
  1404. drbd_md_sync(mdev);
  1405. fail:
  1406. conn_reconfig_done(mdev->tconn);
  1407. if (nbc) {
  1408. if (nbc->backing_bdev)
  1409. blkdev_put(nbc->backing_bdev,
  1410. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1411. if (nbc->md_bdev)
  1412. blkdev_put(nbc->md_bdev,
  1413. FMODE_READ | FMODE_WRITE | FMODE_EXCL);
  1414. kfree(nbc);
  1415. }
  1416. kfree(new_disk_conf);
  1417. lc_destroy(resync_lru);
  1418. kfree(new_plan);
  1419. finish:
  1420. drbd_adm_finish(info, retcode);
  1421. return 0;
  1422. }
  1423. static int adm_detach(struct drbd_conf *mdev, int force)
  1424. {
  1425. enum drbd_state_rv retcode;
  1426. int ret;
  1427. if (force) {
  1428. drbd_force_state(mdev, NS(disk, D_FAILED));
  1429. retcode = SS_SUCCESS;
  1430. goto out;
  1431. }
  1432. drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
  1433. retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
  1434. /* D_FAILED will transition to DISKLESS. */
  1435. ret = wait_event_interruptible(mdev->misc_wait,
  1436. mdev->state.disk != D_FAILED);
  1437. drbd_resume_io(mdev);
  1438. if ((int)retcode == (int)SS_IS_DISKLESS)
  1439. retcode = SS_NOTHING_TO_DO;
  1440. if (ret)
  1441. retcode = ERR_INTR;
  1442. out:
  1443. return retcode;
  1444. }
  1445. /* Detaching the disk is a process in multiple stages. First we need to lock
  1446. * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
  1447. * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
  1448. * internal references as well.
  1449. * Only then we have finally detached. */
  1450. int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
  1451. {
  1452. enum drbd_ret_code retcode;
  1453. struct detach_parms parms = { };
  1454. int err;
  1455. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1456. if (!adm_ctx.reply_skb)
  1457. return retcode;
  1458. if (retcode != NO_ERROR)
  1459. goto out;
  1460. if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
  1461. err = detach_parms_from_attrs(&parms, info);
  1462. if (err) {
  1463. retcode = ERR_MANDATORY_TAG;
  1464. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1465. goto out;
  1466. }
  1467. }
  1468. retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
  1469. out:
  1470. drbd_adm_finish(info, retcode);
  1471. return 0;
  1472. }
  1473. static bool conn_resync_running(struct drbd_tconn *tconn)
  1474. {
  1475. struct drbd_conf *mdev;
  1476. bool rv = false;
  1477. int vnr;
  1478. rcu_read_lock();
  1479. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  1480. if (mdev->state.conn == C_SYNC_SOURCE ||
  1481. mdev->state.conn == C_SYNC_TARGET ||
  1482. mdev->state.conn == C_PAUSED_SYNC_S ||
  1483. mdev->state.conn == C_PAUSED_SYNC_T) {
  1484. rv = true;
  1485. break;
  1486. }
  1487. }
  1488. rcu_read_unlock();
  1489. return rv;
  1490. }
  1491. static bool conn_ov_running(struct drbd_tconn *tconn)
  1492. {
  1493. struct drbd_conf *mdev;
  1494. bool rv = false;
  1495. int vnr;
  1496. rcu_read_lock();
  1497. idr_for_each_entry(&tconn->volumes, mdev, vnr) {
  1498. if (mdev->state.conn == C_VERIFY_S ||
  1499. mdev->state.conn == C_VERIFY_T) {
  1500. rv = true;
  1501. break;
  1502. }
  1503. }
  1504. rcu_read_unlock();
  1505. return rv;
  1506. }
  1507. static enum drbd_ret_code
  1508. _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
  1509. {
  1510. struct drbd_conf *mdev;
  1511. int i;
  1512. if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
  1513. if (new_conf->wire_protocol != old_conf->wire_protocol)
  1514. return ERR_NEED_APV_100;
  1515. if (new_conf->two_primaries != old_conf->two_primaries)
  1516. return ERR_NEED_APV_100;
  1517. if (!new_conf->integrity_alg != !old_conf->integrity_alg)
  1518. return ERR_NEED_APV_100;
  1519. if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
  1520. return ERR_NEED_APV_100;
  1521. }
  1522. if (!new_conf->two_primaries &&
  1523. conn_highest_role(tconn) == R_PRIMARY &&
  1524. conn_highest_peer(tconn) == R_PRIMARY)
  1525. return ERR_NEED_ALLOW_TWO_PRI;
  1526. if (new_conf->two_primaries &&
  1527. (new_conf->wire_protocol != DRBD_PROT_C))
  1528. return ERR_NOT_PROTO_C;
  1529. idr_for_each_entry(&tconn->volumes, mdev, i) {
  1530. if (get_ldev(mdev)) {
  1531. enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
  1532. put_ldev(mdev);
  1533. if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
  1534. return ERR_STONITH_AND_PROT_A;
  1535. }
  1536. if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
  1537. return ERR_DISCARD;
  1538. }
  1539. if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
  1540. return ERR_CONG_NOT_PROTO_A;
  1541. return NO_ERROR;
  1542. }
  1543. static enum drbd_ret_code
  1544. check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
  1545. {
  1546. static enum drbd_ret_code rv;
  1547. struct drbd_conf *mdev;
  1548. int i;
  1549. rcu_read_lock();
  1550. rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
  1551. rcu_read_unlock();
  1552. /* tconn->volumes protected by genl_lock() here */
  1553. idr_for_each_entry(&tconn->volumes, mdev, i) {
  1554. if (!mdev->bitmap) {
  1555. if(drbd_bm_init(mdev))
  1556. return ERR_NOMEM;
  1557. }
  1558. }
  1559. return rv;
  1560. }
  1561. struct crypto {
  1562. struct crypto_hash *verify_tfm;
  1563. struct crypto_hash *csums_tfm;
  1564. struct crypto_hash *cram_hmac_tfm;
  1565. struct crypto_hash *integrity_tfm;
  1566. void *int_dig_in;
  1567. void *int_dig_vv;
  1568. };
  1569. static int
  1570. alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
  1571. {
  1572. if (!tfm_name[0])
  1573. return NO_ERROR;
  1574. *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
  1575. if (IS_ERR(*tfm)) {
  1576. *tfm = NULL;
  1577. return err_alg;
  1578. }
  1579. return NO_ERROR;
  1580. }
  1581. static enum drbd_ret_code
  1582. alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
  1583. {
  1584. char hmac_name[CRYPTO_MAX_ALG_NAME];
  1585. enum drbd_ret_code rv;
  1586. int hash_size;
  1587. rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
  1588. ERR_CSUMS_ALG);
  1589. if (rv != NO_ERROR)
  1590. return rv;
  1591. rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
  1592. ERR_VERIFY_ALG);
  1593. if (rv != NO_ERROR)
  1594. return rv;
  1595. rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
  1596. ERR_INTEGRITY_ALG);
  1597. if (rv != NO_ERROR)
  1598. return rv;
  1599. if (new_conf->cram_hmac_alg[0] != 0) {
  1600. snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
  1601. new_conf->cram_hmac_alg);
  1602. rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
  1603. ERR_AUTH_ALG);
  1604. }
  1605. if (crypto->integrity_tfm) {
  1606. hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
  1607. crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
  1608. if (!crypto->int_dig_in)
  1609. return ERR_NOMEM;
  1610. crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
  1611. if (!crypto->int_dig_vv)
  1612. return ERR_NOMEM;
  1613. }
  1614. return rv;
  1615. }
  1616. static void free_crypto(struct crypto *crypto)
  1617. {
  1618. kfree(crypto->int_dig_in);
  1619. kfree(crypto->int_dig_vv);
  1620. crypto_free_hash(crypto->cram_hmac_tfm);
  1621. crypto_free_hash(crypto->integrity_tfm);
  1622. crypto_free_hash(crypto->csums_tfm);
  1623. crypto_free_hash(crypto->verify_tfm);
  1624. }
  1625. int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
  1626. {
  1627. enum drbd_ret_code retcode;
  1628. struct drbd_tconn *tconn;
  1629. struct net_conf *old_conf, *new_conf = NULL;
  1630. int err;
  1631. int ovr; /* online verify running */
  1632. int rsr; /* re-sync running */
  1633. struct crypto crypto = { };
  1634. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
  1635. if (!adm_ctx.reply_skb)
  1636. return retcode;
  1637. if (retcode != NO_ERROR)
  1638. goto out;
  1639. tconn = adm_ctx.tconn;
  1640. new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
  1641. if (!new_conf) {
  1642. retcode = ERR_NOMEM;
  1643. goto out;
  1644. }
  1645. conn_reconfig_start(tconn);
  1646. mutex_lock(&tconn->data.mutex);
  1647. mutex_lock(&tconn->conf_update);
  1648. old_conf = tconn->net_conf;
  1649. if (!old_conf) {
  1650. drbd_msg_put_info("net conf missing, try connect");
  1651. retcode = ERR_INVALID_REQUEST;
  1652. goto fail;
  1653. }
  1654. *new_conf = *old_conf;
  1655. if (should_set_defaults(info))
  1656. set_net_conf_defaults(new_conf);
  1657. err = net_conf_from_attrs_for_change(new_conf, info);
  1658. if (err && err != -ENOMSG) {
  1659. retcode = ERR_MANDATORY_TAG;
  1660. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1661. goto fail;
  1662. }
  1663. retcode = check_net_options(tconn, new_conf);
  1664. if (retcode != NO_ERROR)
  1665. goto fail;
  1666. /* re-sync running */
  1667. rsr = conn_resync_running(tconn);
  1668. if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
  1669. retcode = ERR_CSUMS_RESYNC_RUNNING;
  1670. goto fail;
  1671. }
  1672. /* online verify running */
  1673. ovr = conn_ov_running(tconn);
  1674. if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
  1675. retcode = ERR_VERIFY_RUNNING;
  1676. goto fail;
  1677. }
  1678. retcode = alloc_crypto(&crypto, new_conf);
  1679. if (retcode != NO_ERROR)
  1680. goto fail;
  1681. rcu_assign_pointer(tconn->net_conf, new_conf);
  1682. if (!rsr) {
  1683. crypto_free_hash(tconn->csums_tfm);
  1684. tconn->csums_tfm = crypto.csums_tfm;
  1685. crypto.csums_tfm = NULL;
  1686. }
  1687. if (!ovr) {
  1688. crypto_free_hash(tconn->verify_tfm);
  1689. tconn->verify_tfm = crypto.verify_tfm;
  1690. crypto.verify_tfm = NULL;
  1691. }
  1692. kfree(tconn->int_dig_in);
  1693. tconn->int_dig_in = crypto.int_dig_in;
  1694. kfree(tconn->int_dig_vv);
  1695. tconn->int_dig_vv = crypto.int_dig_vv;
  1696. crypto_free_hash(tconn->integrity_tfm);
  1697. tconn->integrity_tfm = crypto.integrity_tfm;
  1698. if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
  1699. /* Do this without trying to take tconn->data.mutex again. */
  1700. __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
  1701. crypto_free_hash(tconn->cram_hmac_tfm);
  1702. tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
  1703. mutex_unlock(&tconn->conf_update);
  1704. mutex_unlock(&tconn->data.mutex);
  1705. synchronize_rcu();
  1706. kfree(old_conf);
  1707. if (tconn->cstate >= C_WF_REPORT_PARAMS)
  1708. drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
  1709. goto done;
  1710. fail:
  1711. mutex_unlock(&tconn->conf_update);
  1712. mutex_unlock(&tconn->data.mutex);
  1713. free_crypto(&crypto);
  1714. kfree(new_conf);
  1715. done:
  1716. conn_reconfig_done(tconn);
  1717. out:
  1718. drbd_adm_finish(info, retcode);
  1719. return 0;
  1720. }
  1721. int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
  1722. {
  1723. struct drbd_conf *mdev;
  1724. struct net_conf *old_conf, *new_conf = NULL;
  1725. struct crypto crypto = { };
  1726. struct drbd_tconn *tconn;
  1727. enum drbd_ret_code retcode;
  1728. int i;
  1729. int err;
  1730. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
  1731. if (!adm_ctx.reply_skb)
  1732. return retcode;
  1733. if (retcode != NO_ERROR)
  1734. goto out;
  1735. if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
  1736. drbd_msg_put_info("connection endpoint(s) missing");
  1737. retcode = ERR_INVALID_REQUEST;
  1738. goto out;
  1739. }
  1740. /* No need for _rcu here. All reconfiguration is
  1741. * strictly serialized on genl_lock(). We are protected against
  1742. * concurrent reconfiguration/addition/deletion */
  1743. list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
  1744. if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
  1745. !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
  1746. retcode = ERR_LOCAL_ADDR;
  1747. goto out;
  1748. }
  1749. if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
  1750. !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
  1751. retcode = ERR_PEER_ADDR;
  1752. goto out;
  1753. }
  1754. }
  1755. tconn = adm_ctx.tconn;
  1756. conn_reconfig_start(tconn);
  1757. if (tconn->cstate > C_STANDALONE) {
  1758. retcode = ERR_NET_CONFIGURED;
  1759. goto fail;
  1760. }
  1761. /* allocation not in the IO path, cqueue thread context */
  1762. new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
  1763. if (!new_conf) {
  1764. retcode = ERR_NOMEM;
  1765. goto fail;
  1766. }
  1767. set_net_conf_defaults(new_conf);
  1768. err = net_conf_from_attrs(new_conf, info);
  1769. if (err) {
  1770. retcode = ERR_MANDATORY_TAG;
  1771. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1772. goto fail;
  1773. }
  1774. retcode = check_net_options(tconn, new_conf);
  1775. if (retcode != NO_ERROR)
  1776. goto fail;
  1777. retcode = alloc_crypto(&crypto, new_conf);
  1778. if (retcode != NO_ERROR)
  1779. goto fail;
  1780. ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
  1781. conn_flush_workqueue(tconn);
  1782. mutex_lock(&tconn->conf_update);
  1783. old_conf = tconn->net_conf;
  1784. if (old_conf) {
  1785. retcode = ERR_NET_CONFIGURED;
  1786. mutex_unlock(&tconn->conf_update);
  1787. goto fail;
  1788. }
  1789. rcu_assign_pointer(tconn->net_conf, new_conf);
  1790. conn_free_crypto(tconn);
  1791. tconn->int_dig_in = crypto.int_dig_in;
  1792. tconn->int_dig_vv = crypto.int_dig_vv;
  1793. tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
  1794. tconn->integrity_tfm = crypto.integrity_tfm;
  1795. tconn->csums_tfm = crypto.csums_tfm;
  1796. tconn->verify_tfm = crypto.verify_tfm;
  1797. tconn->my_addr_len = nla_len(adm_ctx.my_addr);
  1798. memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
  1799. tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
  1800. memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
  1801. mutex_unlock(&tconn->conf_update);
  1802. rcu_read_lock();
  1803. idr_for_each_entry(&tconn->volumes, mdev, i) {
  1804. mdev->send_cnt = 0;
  1805. mdev->recv_cnt = 0;
  1806. }
  1807. rcu_read_unlock();
  1808. retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
  1809. conn_reconfig_done(tconn);
  1810. drbd_adm_finish(info, retcode);
  1811. return 0;
  1812. fail:
  1813. free_crypto(&crypto);
  1814. kfree(new_conf);
  1815. conn_reconfig_done(tconn);
  1816. out:
  1817. drbd_adm_finish(info, retcode);
  1818. return 0;
  1819. }
  1820. static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
  1821. {
  1822. enum drbd_state_rv rv;
  1823. rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
  1824. force ? CS_HARD : 0);
  1825. switch (rv) {
  1826. case SS_NOTHING_TO_DO:
  1827. break;
  1828. case SS_ALREADY_STANDALONE:
  1829. return SS_SUCCESS;
  1830. case SS_PRIMARY_NOP:
  1831. /* Our state checking code wants to see the peer outdated. */
  1832. rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
  1833. pdsk, D_OUTDATED), CS_VERBOSE);
  1834. break;
  1835. case SS_CW_FAILED_BY_PEER:
  1836. /* The peer probably wants to see us outdated. */
  1837. rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
  1838. disk, D_OUTDATED), 0);
  1839. if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
  1840. rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
  1841. CS_HARD);
  1842. }
  1843. break;
  1844. default:;
  1845. /* no special handling necessary */
  1846. }
  1847. if (rv >= SS_SUCCESS) {
  1848. enum drbd_state_rv rv2;
  1849. /* No one else can reconfigure the network while I am here.
  1850. * The state handling only uses drbd_thread_stop_nowait(),
  1851. * we want to really wait here until the receiver is no more.
  1852. */
  1853. drbd_thread_stop(&adm_ctx.tconn->receiver);
  1854. /* Race breaker. This additional state change request may be
  1855. * necessary, if this was a forced disconnect during a receiver
  1856. * restart. We may have "killed" the receiver thread just
  1857. * after drbdd_init() returned. Typically, we should be
  1858. * C_STANDALONE already, now, and this becomes a no-op.
  1859. */
  1860. rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
  1861. CS_VERBOSE | CS_HARD);
  1862. if (rv2 < SS_SUCCESS)
  1863. conn_err(tconn,
  1864. "unexpected rv2=%d in conn_try_disconnect()\n",
  1865. rv2);
  1866. }
  1867. return rv;
  1868. }
  1869. int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
  1870. {
  1871. struct disconnect_parms parms;
  1872. struct drbd_tconn *tconn;
  1873. enum drbd_state_rv rv;
  1874. enum drbd_ret_code retcode;
  1875. int err;
  1876. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
  1877. if (!adm_ctx.reply_skb)
  1878. return retcode;
  1879. if (retcode != NO_ERROR)
  1880. goto fail;
  1881. tconn = adm_ctx.tconn;
  1882. memset(&parms, 0, sizeof(parms));
  1883. if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
  1884. err = disconnect_parms_from_attrs(&parms, info);
  1885. if (err) {
  1886. retcode = ERR_MANDATORY_TAG;
  1887. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1888. goto fail;
  1889. }
  1890. }
  1891. rv = conn_try_disconnect(tconn, parms.force_disconnect);
  1892. if (rv < SS_SUCCESS)
  1893. retcode = rv; /* FIXME: Type mismatch. */
  1894. else
  1895. retcode = NO_ERROR;
  1896. fail:
  1897. drbd_adm_finish(info, retcode);
  1898. return 0;
  1899. }
  1900. void resync_after_online_grow(struct drbd_conf *mdev)
  1901. {
  1902. int iass; /* I am sync source */
  1903. dev_info(DEV, "Resync of new storage after online grow\n");
  1904. if (mdev->state.role != mdev->state.peer)
  1905. iass = (mdev->state.role == R_PRIMARY);
  1906. else
  1907. iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
  1908. if (iass)
  1909. drbd_start_resync(mdev, C_SYNC_SOURCE);
  1910. else
  1911. _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
  1912. }
  1913. int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
  1914. {
  1915. struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
  1916. struct resize_parms rs;
  1917. struct drbd_conf *mdev;
  1918. enum drbd_ret_code retcode;
  1919. enum determine_dev_size dd;
  1920. enum dds_flags ddsf;
  1921. sector_t u_size;
  1922. int err;
  1923. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  1924. if (!adm_ctx.reply_skb)
  1925. return retcode;
  1926. if (retcode != NO_ERROR)
  1927. goto fail;
  1928. memset(&rs, 0, sizeof(struct resize_parms));
  1929. if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
  1930. err = resize_parms_from_attrs(&rs, info);
  1931. if (err) {
  1932. retcode = ERR_MANDATORY_TAG;
  1933. drbd_msg_put_info(from_attrs_err_to_txt(err));
  1934. goto fail;
  1935. }
  1936. }
  1937. mdev = adm_ctx.mdev;
  1938. if (mdev->state.conn > C_CONNECTED) {
  1939. retcode = ERR_RESIZE_RESYNC;
  1940. goto fail;
  1941. }
  1942. if (mdev->state.role == R_SECONDARY &&
  1943. mdev->state.peer == R_SECONDARY) {
  1944. retcode = ERR_NO_PRIMARY;
  1945. goto fail;
  1946. }
  1947. if (!get_ldev(mdev)) {
  1948. retcode = ERR_NO_DISK;
  1949. goto fail;
  1950. }
  1951. if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
  1952. retcode = ERR_NEED_APV_93;
  1953. goto fail;
  1954. }
  1955. rcu_read_lock();
  1956. u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
  1957. rcu_read_unlock();
  1958. if (u_size != (sector_t)rs.resize_size) {
  1959. new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
  1960. if (!new_disk_conf) {
  1961. retcode = ERR_NOMEM;
  1962. goto fail;
  1963. }
  1964. }
  1965. if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
  1966. mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
  1967. if (new_disk_conf) {
  1968. mutex_lock(&mdev->tconn->conf_update);
  1969. old_disk_conf = mdev->ldev->disk_conf;
  1970. *new_disk_conf = *old_disk_conf;
  1971. new_disk_conf->disk_size = (sector_t)rs.resize_size;
  1972. rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
  1973. mutex_unlock(&mdev->tconn->conf_update);
  1974. synchronize_rcu();
  1975. kfree(old_disk_conf);
  1976. }
  1977. ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
  1978. dd = drbd_determine_dev_size(mdev, ddsf);
  1979. drbd_md_sync(mdev);
  1980. put_ldev(mdev);
  1981. if (dd == dev_size_error) {
  1982. retcode = ERR_NOMEM_BITMAP;
  1983. goto fail;
  1984. }
  1985. if (mdev->state.conn == C_CONNECTED) {
  1986. if (dd == grew)
  1987. set_bit(RESIZE_PENDING, &mdev->flags);
  1988. drbd_send_uuids(mdev);
  1989. drbd_send_sizes(mdev, 1, ddsf);
  1990. }
  1991. fail:
  1992. drbd_adm_finish(info, retcode);
  1993. return 0;
  1994. }
  1995. int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
  1996. {
  1997. enum drbd_ret_code retcode;
  1998. struct drbd_tconn *tconn;
  1999. struct res_opts res_opts;
  2000. int err;
  2001. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
  2002. if (!adm_ctx.reply_skb)
  2003. return retcode;
  2004. if (retcode != NO_ERROR)
  2005. goto fail;
  2006. tconn = adm_ctx.tconn;
  2007. res_opts = tconn->res_opts;
  2008. if (should_set_defaults(info))
  2009. set_res_opts_defaults(&res_opts);
  2010. err = res_opts_from_attrs(&res_opts, info);
  2011. if (err && err != -ENOMSG) {
  2012. retcode = ERR_MANDATORY_TAG;
  2013. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2014. goto fail;
  2015. }
  2016. err = set_resource_options(tconn, &res_opts);
  2017. if (err) {
  2018. retcode = ERR_INVALID_REQUEST;
  2019. if (err == -ENOMEM)
  2020. retcode = ERR_NOMEM;
  2021. }
  2022. fail:
  2023. drbd_adm_finish(info, retcode);
  2024. return 0;
  2025. }
  2026. int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
  2027. {
  2028. struct drbd_conf *mdev;
  2029. int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
  2030. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2031. if (!adm_ctx.reply_skb)
  2032. return retcode;
  2033. if (retcode != NO_ERROR)
  2034. goto out;
  2035. mdev = adm_ctx.mdev;
  2036. /* If there is still bitmap IO pending, probably because of a previous
  2037. * resync just being finished, wait for it before requesting a new resync. */
  2038. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  2039. retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
  2040. if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
  2041. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  2042. while (retcode == SS_NEED_CONNECTION) {
  2043. spin_lock_irq(&mdev->tconn->req_lock);
  2044. if (mdev->state.conn < C_CONNECTED)
  2045. retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
  2046. spin_unlock_irq(&mdev->tconn->req_lock);
  2047. if (retcode != SS_NEED_CONNECTION)
  2048. break;
  2049. retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
  2050. }
  2051. out:
  2052. drbd_adm_finish(info, retcode);
  2053. return 0;
  2054. }
  2055. static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
  2056. {
  2057. int rv;
  2058. rv = drbd_bmio_set_n_write(mdev);
  2059. drbd_suspend_al(mdev);
  2060. return rv;
  2061. }
  2062. static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
  2063. union drbd_state mask, union drbd_state val)
  2064. {
  2065. enum drbd_ret_code retcode;
  2066. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2067. if (!adm_ctx.reply_skb)
  2068. return retcode;
  2069. if (retcode != NO_ERROR)
  2070. goto out;
  2071. retcode = drbd_request_state(adm_ctx.mdev, mask, val);
  2072. out:
  2073. drbd_adm_finish(info, retcode);
  2074. return 0;
  2075. }
  2076. int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
  2077. {
  2078. return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
  2079. }
  2080. int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
  2081. {
  2082. enum drbd_ret_code retcode;
  2083. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2084. if (!adm_ctx.reply_skb)
  2085. return retcode;
  2086. if (retcode != NO_ERROR)
  2087. goto out;
  2088. if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
  2089. retcode = ERR_PAUSE_IS_SET;
  2090. out:
  2091. drbd_adm_finish(info, retcode);
  2092. return 0;
  2093. }
  2094. int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
  2095. {
  2096. union drbd_dev_state s;
  2097. enum drbd_ret_code retcode;
  2098. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2099. if (!adm_ctx.reply_skb)
  2100. return retcode;
  2101. if (retcode != NO_ERROR)
  2102. goto out;
  2103. if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
  2104. s = adm_ctx.mdev->state;
  2105. if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
  2106. retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
  2107. s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
  2108. } else {
  2109. retcode = ERR_PAUSE_IS_CLEAR;
  2110. }
  2111. }
  2112. out:
  2113. drbd_adm_finish(info, retcode);
  2114. return 0;
  2115. }
  2116. int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
  2117. {
  2118. return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
  2119. }
  2120. int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
  2121. {
  2122. struct drbd_conf *mdev;
  2123. int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
  2124. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2125. if (!adm_ctx.reply_skb)
  2126. return retcode;
  2127. if (retcode != NO_ERROR)
  2128. goto out;
  2129. mdev = adm_ctx.mdev;
  2130. if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
  2131. drbd_uuid_new_current(mdev);
  2132. clear_bit(NEW_CUR_UUID, &mdev->flags);
  2133. }
  2134. drbd_suspend_io(mdev);
  2135. retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
  2136. if (retcode == SS_SUCCESS) {
  2137. if (mdev->state.conn < C_CONNECTED)
  2138. tl_clear(mdev->tconn);
  2139. if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
  2140. tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
  2141. }
  2142. drbd_resume_io(mdev);
  2143. out:
  2144. drbd_adm_finish(info, retcode);
  2145. return 0;
  2146. }
  2147. int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
  2148. {
  2149. return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
  2150. }
  2151. int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
  2152. {
  2153. struct nlattr *nla;
  2154. nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
  2155. if (!nla)
  2156. goto nla_put_failure;
  2157. if (vnr != VOLUME_UNSPECIFIED)
  2158. NLA_PUT_U32(skb, T_ctx_volume, vnr);
  2159. NLA_PUT_STRING(skb, T_ctx_resource_name, tconn->name);
  2160. if (tconn->my_addr_len)
  2161. NLA_PUT(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr);
  2162. if (tconn->peer_addr_len)
  2163. NLA_PUT(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr);
  2164. nla_nest_end(skb, nla);
  2165. return 0;
  2166. nla_put_failure:
  2167. if (nla)
  2168. nla_nest_cancel(skb, nla);
  2169. return -EMSGSIZE;
  2170. }
  2171. int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
  2172. const struct sib_info *sib)
  2173. {
  2174. struct state_info *si = NULL; /* for sizeof(si->member); */
  2175. struct net_conf *nc;
  2176. struct nlattr *nla;
  2177. int got_ldev;
  2178. int err = 0;
  2179. int exclude_sensitive;
  2180. /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
  2181. * to. So we better exclude_sensitive information.
  2182. *
  2183. * If sib == NULL, this is drbd_adm_get_status, executed synchronously
  2184. * in the context of the requesting user process. Exclude sensitive
  2185. * information, unless current has superuser.
  2186. *
  2187. * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
  2188. * relies on the current implementation of netlink_dump(), which
  2189. * executes the dump callback successively from netlink_recvmsg(),
  2190. * always in the context of the receiving process */
  2191. exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
  2192. got_ldev = get_ldev(mdev);
  2193. /* We need to add connection name and volume number information still.
  2194. * Minor number is in drbd_genlmsghdr. */
  2195. if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
  2196. goto nla_put_failure;
  2197. if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
  2198. goto nla_put_failure;
  2199. rcu_read_lock();
  2200. if (got_ldev)
  2201. if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
  2202. goto nla_put_failure;
  2203. nc = rcu_dereference(mdev->tconn->net_conf);
  2204. if (nc)
  2205. err = net_conf_to_skb(skb, nc, exclude_sensitive);
  2206. rcu_read_unlock();
  2207. if (err)
  2208. goto nla_put_failure;
  2209. nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
  2210. if (!nla)
  2211. goto nla_put_failure;
  2212. NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
  2213. NLA_PUT_U32(skb, T_current_state, mdev->state.i);
  2214. NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
  2215. NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
  2216. if (got_ldev) {
  2217. NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
  2218. NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
  2219. NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
  2220. NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
  2221. if (C_SYNC_SOURCE <= mdev->state.conn &&
  2222. C_PAUSED_SYNC_T >= mdev->state.conn) {
  2223. NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
  2224. NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
  2225. }
  2226. }
  2227. if (sib) {
  2228. switch(sib->sib_reason) {
  2229. case SIB_SYNC_PROGRESS:
  2230. case SIB_GET_STATUS_REPLY:
  2231. break;
  2232. case SIB_STATE_CHANGE:
  2233. NLA_PUT_U32(skb, T_prev_state, sib->os.i);
  2234. NLA_PUT_U32(skb, T_new_state, sib->ns.i);
  2235. break;
  2236. case SIB_HELPER_POST:
  2237. NLA_PUT_U32(skb,
  2238. T_helper_exit_code, sib->helper_exit_code);
  2239. /* fall through */
  2240. case SIB_HELPER_PRE:
  2241. NLA_PUT_STRING(skb, T_helper, sib->helper_name);
  2242. break;
  2243. }
  2244. }
  2245. nla_nest_end(skb, nla);
  2246. if (0)
  2247. nla_put_failure:
  2248. err = -EMSGSIZE;
  2249. if (got_ldev)
  2250. put_ldev(mdev);
  2251. return err;
  2252. }
  2253. int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
  2254. {
  2255. enum drbd_ret_code retcode;
  2256. int err;
  2257. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2258. if (!adm_ctx.reply_skb)
  2259. return retcode;
  2260. if (retcode != NO_ERROR)
  2261. goto out;
  2262. err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
  2263. if (err) {
  2264. nlmsg_free(adm_ctx.reply_skb);
  2265. return err;
  2266. }
  2267. out:
  2268. drbd_adm_finish(info, retcode);
  2269. return 0;
  2270. }
  2271. int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
  2272. {
  2273. struct drbd_conf *mdev;
  2274. struct drbd_genlmsghdr *dh;
  2275. struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
  2276. struct drbd_tconn *tconn = NULL;
  2277. struct drbd_tconn *tmp;
  2278. unsigned volume = cb->args[1];
  2279. /* Open coded, deferred, iteration:
  2280. * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
  2281. * idr_for_each_entry(&tconn->volumes, mdev, i) {
  2282. * ...
  2283. * }
  2284. * }
  2285. * where tconn is cb->args[0];
  2286. * and i is cb->args[1];
  2287. *
  2288. * cb->args[2] indicates if we shall loop over all resources,
  2289. * or just dump all volumes of a single resource.
  2290. *
  2291. * This may miss entries inserted after this dump started,
  2292. * or entries deleted before they are reached.
  2293. *
  2294. * We need to make sure the mdev won't disappear while
  2295. * we are looking at it, and revalidate our iterators
  2296. * on each iteration.
  2297. */
  2298. /* synchronize with conn_create()/conn_destroy() */
  2299. rcu_read_lock();
  2300. /* revalidate iterator position */
  2301. list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
  2302. if (pos == NULL) {
  2303. /* first iteration */
  2304. pos = tmp;
  2305. tconn = pos;
  2306. break;
  2307. }
  2308. if (tmp == pos) {
  2309. tconn = pos;
  2310. break;
  2311. }
  2312. }
  2313. if (tconn) {
  2314. next_tconn:
  2315. mdev = idr_get_next(&tconn->volumes, &volume);
  2316. if (!mdev) {
  2317. /* No more volumes to dump on this tconn.
  2318. * Advance tconn iterator. */
  2319. pos = list_entry_rcu(tconn->all_tconn.next,
  2320. struct drbd_tconn, all_tconn);
  2321. /* Did we dump any volume on this tconn yet? */
  2322. if (volume != 0) {
  2323. /* If we reached the end of the list,
  2324. * or only a single resource dump was requested,
  2325. * we are done. */
  2326. if (&pos->all_tconn == &drbd_tconns || cb->args[2])
  2327. goto out;
  2328. volume = 0;
  2329. tconn = pos;
  2330. goto next_tconn;
  2331. }
  2332. }
  2333. dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
  2334. cb->nlh->nlmsg_seq, &drbd_genl_family,
  2335. NLM_F_MULTI, DRBD_ADM_GET_STATUS);
  2336. if (!dh)
  2337. goto out;
  2338. if (!mdev) {
  2339. /* this is a tconn without a single volume */
  2340. dh->minor = -1U;
  2341. dh->ret_code = NO_ERROR;
  2342. if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
  2343. genlmsg_cancel(skb, dh);
  2344. else
  2345. genlmsg_end(skb, dh);
  2346. goto out;
  2347. }
  2348. D_ASSERT(mdev->vnr == volume);
  2349. D_ASSERT(mdev->tconn == tconn);
  2350. dh->minor = mdev_to_minor(mdev);
  2351. dh->ret_code = NO_ERROR;
  2352. if (nla_put_status_info(skb, mdev, NULL)) {
  2353. genlmsg_cancel(skb, dh);
  2354. goto out;
  2355. }
  2356. genlmsg_end(skb, dh);
  2357. }
  2358. out:
  2359. rcu_read_unlock();
  2360. /* where to start the next iteration */
  2361. cb->args[0] = (long)pos;
  2362. cb->args[1] = (pos == tconn) ? volume + 1 : 0;
  2363. /* No more tconns/volumes/minors found results in an empty skb.
  2364. * Which will terminate the dump. */
  2365. return skb->len;
  2366. }
  2367. /*
  2368. * Request status of all resources, or of all volumes within a single resource.
  2369. *
  2370. * This is a dump, as the answer may not fit in a single reply skb otherwise.
  2371. * Which means we cannot use the family->attrbuf or other such members, because
  2372. * dump is NOT protected by the genl_lock(). During dump, we only have access
  2373. * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
  2374. *
  2375. * Once things are setup properly, we call into get_one_status().
  2376. */
  2377. int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
  2378. {
  2379. const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
  2380. struct nlattr *nla;
  2381. const char *resource_name;
  2382. struct drbd_tconn *tconn;
  2383. int maxtype;
  2384. /* Is this a followup call? */
  2385. if (cb->args[0]) {
  2386. /* ... of a single resource dump,
  2387. * and the resource iterator has been advanced already? */
  2388. if (cb->args[2] && cb->args[2] != cb->args[0])
  2389. return 0; /* DONE. */
  2390. goto dump;
  2391. }
  2392. /* First call (from netlink_dump_start). We need to figure out
  2393. * which resource(s) the user wants us to dump. */
  2394. nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
  2395. nlmsg_attrlen(cb->nlh, hdrlen),
  2396. DRBD_NLA_CFG_CONTEXT);
  2397. /* No explicit context given. Dump all. */
  2398. if (!nla)
  2399. goto dump;
  2400. maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
  2401. nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
  2402. if (IS_ERR(nla))
  2403. return PTR_ERR(nla);
  2404. /* context given, but no name present? */
  2405. if (!nla)
  2406. return -EINVAL;
  2407. resource_name = nla_data(nla);
  2408. tconn = conn_get_by_name(resource_name);
  2409. if (!tconn)
  2410. return -ENODEV;
  2411. kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
  2412. /* prime iterators, and set "filter" mode mark:
  2413. * only dump this tconn. */
  2414. cb->args[0] = (long)tconn;
  2415. /* cb->args[1] = 0; passed in this way. */
  2416. cb->args[2] = (long)tconn;
  2417. dump:
  2418. return get_one_status(skb, cb);
  2419. }
  2420. int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
  2421. {
  2422. enum drbd_ret_code retcode;
  2423. struct timeout_parms tp;
  2424. int err;
  2425. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2426. if (!adm_ctx.reply_skb)
  2427. return retcode;
  2428. if (retcode != NO_ERROR)
  2429. goto out;
  2430. tp.timeout_type =
  2431. adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
  2432. test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
  2433. UT_DEFAULT;
  2434. err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
  2435. if (err) {
  2436. nlmsg_free(adm_ctx.reply_skb);
  2437. return err;
  2438. }
  2439. out:
  2440. drbd_adm_finish(info, retcode);
  2441. return 0;
  2442. }
  2443. int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
  2444. {
  2445. struct drbd_conf *mdev;
  2446. enum drbd_ret_code retcode;
  2447. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2448. if (!adm_ctx.reply_skb)
  2449. return retcode;
  2450. if (retcode != NO_ERROR)
  2451. goto out;
  2452. mdev = adm_ctx.mdev;
  2453. if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
  2454. /* resume from last known position, if possible */
  2455. struct start_ov_parms parms =
  2456. { .ov_start_sector = mdev->ov_start_sector };
  2457. int err = start_ov_parms_from_attrs(&parms, info);
  2458. if (err) {
  2459. retcode = ERR_MANDATORY_TAG;
  2460. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2461. goto out;
  2462. }
  2463. /* w_make_ov_request expects position to be aligned */
  2464. mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
  2465. }
  2466. /* If there is still bitmap IO pending, e.g. previous resync or verify
  2467. * just being finished, wait for it before requesting a new resync. */
  2468. wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
  2469. retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
  2470. out:
  2471. drbd_adm_finish(info, retcode);
  2472. return 0;
  2473. }
  2474. int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
  2475. {
  2476. struct drbd_conf *mdev;
  2477. enum drbd_ret_code retcode;
  2478. int skip_initial_sync = 0;
  2479. int err;
  2480. struct new_c_uuid_parms args;
  2481. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2482. if (!adm_ctx.reply_skb)
  2483. return retcode;
  2484. if (retcode != NO_ERROR)
  2485. goto out_nolock;
  2486. mdev = adm_ctx.mdev;
  2487. memset(&args, 0, sizeof(args));
  2488. if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
  2489. err = new_c_uuid_parms_from_attrs(&args, info);
  2490. if (err) {
  2491. retcode = ERR_MANDATORY_TAG;
  2492. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2493. goto out_nolock;
  2494. }
  2495. }
  2496. mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
  2497. if (!get_ldev(mdev)) {
  2498. retcode = ERR_NO_DISK;
  2499. goto out;
  2500. }
  2501. /* this is "skip initial sync", assume to be clean */
  2502. if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
  2503. mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
  2504. dev_info(DEV, "Preparing to skip initial sync\n");
  2505. skip_initial_sync = 1;
  2506. } else if (mdev->state.conn != C_STANDALONE) {
  2507. retcode = ERR_CONNECTED;
  2508. goto out_dec;
  2509. }
  2510. drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
  2511. drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
  2512. if (args.clear_bm) {
  2513. err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
  2514. "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
  2515. if (err) {
  2516. dev_err(DEV, "Writing bitmap failed with %d\n",err);
  2517. retcode = ERR_IO_MD_DISK;
  2518. }
  2519. if (skip_initial_sync) {
  2520. drbd_send_uuids_skip_initial_sync(mdev);
  2521. _drbd_uuid_set(mdev, UI_BITMAP, 0);
  2522. drbd_print_uuids(mdev, "cleared bitmap UUID");
  2523. spin_lock_irq(&mdev->tconn->req_lock);
  2524. _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
  2525. CS_VERBOSE, NULL);
  2526. spin_unlock_irq(&mdev->tconn->req_lock);
  2527. }
  2528. }
  2529. drbd_md_sync(mdev);
  2530. out_dec:
  2531. put_ldev(mdev);
  2532. out:
  2533. mutex_unlock(mdev->state_mutex);
  2534. out_nolock:
  2535. drbd_adm_finish(info, retcode);
  2536. return 0;
  2537. }
  2538. static enum drbd_ret_code
  2539. drbd_check_resource_name(const char *name)
  2540. {
  2541. if (!name || !name[0]) {
  2542. drbd_msg_put_info("resource name missing");
  2543. return ERR_MANDATORY_TAG;
  2544. }
  2545. /* if we want to use these in sysfs/configfs/debugfs some day,
  2546. * we must not allow slashes */
  2547. if (strchr(name, '/')) {
  2548. drbd_msg_put_info("invalid resource name");
  2549. return ERR_INVALID_REQUEST;
  2550. }
  2551. return NO_ERROR;
  2552. }
  2553. int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
  2554. {
  2555. enum drbd_ret_code retcode;
  2556. struct res_opts res_opts;
  2557. int err;
  2558. retcode = drbd_adm_prepare(skb, info, 0);
  2559. if (!adm_ctx.reply_skb)
  2560. return retcode;
  2561. if (retcode != NO_ERROR)
  2562. goto out;
  2563. set_res_opts_defaults(&res_opts);
  2564. err = res_opts_from_attrs(&res_opts, info);
  2565. if (err && err != -ENOMSG) {
  2566. retcode = ERR_MANDATORY_TAG;
  2567. drbd_msg_put_info(from_attrs_err_to_txt(err));
  2568. goto out;
  2569. }
  2570. retcode = drbd_check_resource_name(adm_ctx.resource_name);
  2571. if (retcode != NO_ERROR)
  2572. goto out;
  2573. if (adm_ctx.tconn) {
  2574. if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
  2575. retcode = ERR_INVALID_REQUEST;
  2576. drbd_msg_put_info("resource exists");
  2577. }
  2578. /* else: still NO_ERROR */
  2579. goto out;
  2580. }
  2581. if (!conn_create(adm_ctx.resource_name, &res_opts))
  2582. retcode = ERR_NOMEM;
  2583. out:
  2584. drbd_adm_finish(info, retcode);
  2585. return 0;
  2586. }
  2587. int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
  2588. {
  2589. struct drbd_genlmsghdr *dh = info->userhdr;
  2590. enum drbd_ret_code retcode;
  2591. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
  2592. if (!adm_ctx.reply_skb)
  2593. return retcode;
  2594. if (retcode != NO_ERROR)
  2595. goto out;
  2596. /* FIXME drop minor_count parameter, limit to MINORMASK */
  2597. if (dh->minor >= minor_count) {
  2598. drbd_msg_put_info("requested minor out of range");
  2599. retcode = ERR_INVALID_REQUEST;
  2600. goto out;
  2601. }
  2602. if (adm_ctx.volume > DRBD_VOLUME_MAX) {
  2603. drbd_msg_put_info("requested volume id out of range");
  2604. retcode = ERR_INVALID_REQUEST;
  2605. goto out;
  2606. }
  2607. /* drbd_adm_prepare made sure already
  2608. * that mdev->tconn and mdev->vnr match the request. */
  2609. if (adm_ctx.mdev) {
  2610. if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
  2611. retcode = ERR_MINOR_EXISTS;
  2612. /* else: still NO_ERROR */
  2613. goto out;
  2614. }
  2615. retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
  2616. out:
  2617. drbd_adm_finish(info, retcode);
  2618. return 0;
  2619. }
  2620. static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
  2621. {
  2622. if (mdev->state.disk == D_DISKLESS &&
  2623. /* no need to be mdev->state.conn == C_STANDALONE &&
  2624. * we may want to delete a minor from a live replication group.
  2625. */
  2626. mdev->state.role == R_SECONDARY) {
  2627. idr_remove(&mdev->tconn->volumes, mdev->vnr);
  2628. idr_remove(&minors, mdev_to_minor(mdev));
  2629. del_gendisk(mdev->vdisk);
  2630. synchronize_rcu();
  2631. kref_put(&mdev->kref, &drbd_minor_destroy);
  2632. return NO_ERROR;
  2633. } else
  2634. return ERR_MINOR_CONFIGURED;
  2635. }
  2636. int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
  2637. {
  2638. enum drbd_ret_code retcode;
  2639. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
  2640. if (!adm_ctx.reply_skb)
  2641. return retcode;
  2642. if (retcode != NO_ERROR)
  2643. goto out;
  2644. retcode = adm_delete_minor(adm_ctx.mdev);
  2645. out:
  2646. drbd_adm_finish(info, retcode);
  2647. return 0;
  2648. }
  2649. int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
  2650. {
  2651. int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
  2652. struct drbd_conf *mdev;
  2653. unsigned i;
  2654. retcode = drbd_adm_prepare(skb, info, 0);
  2655. if (!adm_ctx.reply_skb)
  2656. return retcode;
  2657. if (retcode != NO_ERROR)
  2658. goto out;
  2659. if (!adm_ctx.tconn) {
  2660. retcode = ERR_RES_NOT_KNOWN;
  2661. goto out;
  2662. }
  2663. /* demote */
  2664. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2665. retcode = drbd_set_role(mdev, R_SECONDARY, 0);
  2666. if (retcode < SS_SUCCESS) {
  2667. drbd_msg_put_info("failed to demote");
  2668. goto out;
  2669. }
  2670. }
  2671. retcode = conn_try_disconnect(adm_ctx.tconn, 0);
  2672. if (retcode < SS_SUCCESS) {
  2673. drbd_msg_put_info("failed to disconnect");
  2674. goto out;
  2675. }
  2676. /* detach */
  2677. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2678. retcode = adm_detach(mdev, 0);
  2679. if (retcode < SS_SUCCESS) {
  2680. drbd_msg_put_info("failed to detach");
  2681. goto out;
  2682. }
  2683. }
  2684. /* If we reach this, all volumes (of this tconn) are Secondary,
  2685. * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
  2686. * actually stopped, state handling only does drbd_thread_stop_nowait(). */
  2687. drbd_thread_stop(&adm_ctx.tconn->worker);
  2688. /* Now, nothing can fail anymore */
  2689. /* delete volumes */
  2690. idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
  2691. retcode = adm_delete_minor(mdev);
  2692. if (retcode != NO_ERROR) {
  2693. /* "can not happen" */
  2694. drbd_msg_put_info("failed to delete volume");
  2695. goto out;
  2696. }
  2697. }
  2698. /* delete connection */
  2699. if (conn_lowest_minor(adm_ctx.tconn) < 0) {
  2700. list_del_rcu(&adm_ctx.tconn->all_tconn);
  2701. synchronize_rcu();
  2702. kref_put(&adm_ctx.tconn->kref, &conn_destroy);
  2703. retcode = NO_ERROR;
  2704. } else {
  2705. /* "can not happen" */
  2706. retcode = ERR_RES_IN_USE;
  2707. drbd_msg_put_info("failed to delete connection");
  2708. }
  2709. goto out;
  2710. out:
  2711. drbd_adm_finish(info, retcode);
  2712. return 0;
  2713. }
  2714. int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
  2715. {
  2716. enum drbd_ret_code retcode;
  2717. retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
  2718. if (!adm_ctx.reply_skb)
  2719. return retcode;
  2720. if (retcode != NO_ERROR)
  2721. goto out;
  2722. if (conn_lowest_minor(adm_ctx.tconn) < 0) {
  2723. list_del_rcu(&adm_ctx.tconn->all_tconn);
  2724. synchronize_rcu();
  2725. kref_put(&adm_ctx.tconn->kref, &conn_destroy);
  2726. retcode = NO_ERROR;
  2727. } else {
  2728. retcode = ERR_RES_IN_USE;
  2729. }
  2730. if (retcode == NO_ERROR)
  2731. drbd_thread_stop(&adm_ctx.tconn->worker);
  2732. out:
  2733. drbd_adm_finish(info, retcode);
  2734. return 0;
  2735. }
  2736. void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
  2737. {
  2738. static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
  2739. struct sk_buff *msg;
  2740. struct drbd_genlmsghdr *d_out;
  2741. unsigned seq;
  2742. int err = -ENOMEM;
  2743. seq = atomic_inc_return(&drbd_genl_seq);
  2744. msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
  2745. if (!msg)
  2746. goto failed;
  2747. err = -EMSGSIZE;
  2748. d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
  2749. if (!d_out) /* cannot happen, but anyways. */
  2750. goto nla_put_failure;
  2751. d_out->minor = mdev_to_minor(mdev);
  2752. d_out->ret_code = NO_ERROR;
  2753. if (nla_put_status_info(msg, mdev, sib))
  2754. goto nla_put_failure;
  2755. genlmsg_end(msg, d_out);
  2756. err = drbd_genl_multicast_events(msg, 0);
  2757. /* msg has been consumed or freed in netlink_broadcast() */
  2758. if (err && err != -ESRCH)
  2759. goto failed;
  2760. return;
  2761. nla_put_failure:
  2762. nlmsg_free(msg);
  2763. failed:
  2764. dev_err(DEV, "Error %d while broadcasting event. "
  2765. "Event seq:%u sib_reason:%u\n",
  2766. err, seq, sib->sib_reason);
  2767. }