mds_client.c 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979
  1. #include "ceph_debug.h"
  2. #include <linux/wait.h>
  3. #include <linux/sched.h>
  4. #include "mds_client.h"
  5. #include "mon_client.h"
  6. #include "super.h"
  7. #include "messenger.h"
  8. #include "decode.h"
  9. #include "auth.h"
  10. /*
  11. * A cluster of MDS (metadata server) daemons is responsible for
  12. * managing the file system namespace (the directory hierarchy and
  13. * inodes) and for coordinating shared access to storage. Metadata is
  14. * partitioning hierarchically across a number of servers, and that
  15. * partition varies over time as the cluster adjusts the distribution
  16. * in order to balance load.
  17. *
  18. * The MDS client is primarily responsible to managing synchronous
  19. * metadata requests for operations like open, unlink, and so forth.
  20. * If there is a MDS failure, we find out about it when we (possibly
  21. * request and) receive a new MDS map, and can resubmit affected
  22. * requests.
  23. *
  24. * For the most part, though, we take advantage of a lossless
  25. * communications channel to the MDS, and do not need to worry about
  26. * timing out or resubmitting requests.
  27. *
  28. * We maintain a stateful "session" with each MDS we interact with.
  29. * Within each session, we sent periodic heartbeat messages to ensure
  30. * any capabilities or leases we have been issues remain valid. If
  31. * the session times out and goes stale, our leases and capabilities
  32. * are no longer valid.
  33. */
  34. static void __wake_requests(struct ceph_mds_client *mdsc,
  35. struct list_head *head);
  36. const static struct ceph_connection_operations mds_con_ops;
  37. /*
  38. * mds reply parsing
  39. */
  40. /*
  41. * parse individual inode info
  42. */
  43. static int parse_reply_info_in(void **p, void *end,
  44. struct ceph_mds_reply_info_in *info)
  45. {
  46. int err = -EIO;
  47. info->in = *p;
  48. *p += sizeof(struct ceph_mds_reply_inode) +
  49. sizeof(*info->in->fragtree.splits) *
  50. le32_to_cpu(info->in->fragtree.nsplits);
  51. ceph_decode_32_safe(p, end, info->symlink_len, bad);
  52. ceph_decode_need(p, end, info->symlink_len, bad);
  53. info->symlink = *p;
  54. *p += info->symlink_len;
  55. ceph_decode_32_safe(p, end, info->xattr_len, bad);
  56. ceph_decode_need(p, end, info->xattr_len, bad);
  57. info->xattr_data = *p;
  58. *p += info->xattr_len;
  59. return 0;
  60. bad:
  61. return err;
  62. }
  63. /*
  64. * parse a normal reply, which may contain a (dir+)dentry and/or a
  65. * target inode.
  66. */
  67. static int parse_reply_info_trace(void **p, void *end,
  68. struct ceph_mds_reply_info_parsed *info)
  69. {
  70. int err;
  71. if (info->head->is_dentry) {
  72. err = parse_reply_info_in(p, end, &info->diri);
  73. if (err < 0)
  74. goto out_bad;
  75. if (unlikely(*p + sizeof(*info->dirfrag) > end))
  76. goto bad;
  77. info->dirfrag = *p;
  78. *p += sizeof(*info->dirfrag) +
  79. sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
  80. if (unlikely(*p > end))
  81. goto bad;
  82. ceph_decode_32_safe(p, end, info->dname_len, bad);
  83. ceph_decode_need(p, end, info->dname_len, bad);
  84. info->dname = *p;
  85. *p += info->dname_len;
  86. info->dlease = *p;
  87. *p += sizeof(*info->dlease);
  88. }
  89. if (info->head->is_target) {
  90. err = parse_reply_info_in(p, end, &info->targeti);
  91. if (err < 0)
  92. goto out_bad;
  93. }
  94. if (unlikely(*p != end))
  95. goto bad;
  96. return 0;
  97. bad:
  98. err = -EIO;
  99. out_bad:
  100. pr_err("problem parsing mds trace %d\n", err);
  101. return err;
  102. }
  103. /*
  104. * parse readdir results
  105. */
  106. static int parse_reply_info_dir(void **p, void *end,
  107. struct ceph_mds_reply_info_parsed *info)
  108. {
  109. u32 num, i = 0;
  110. int err;
  111. info->dir_dir = *p;
  112. if (*p + sizeof(*info->dir_dir) > end)
  113. goto bad;
  114. *p += sizeof(*info->dir_dir) +
  115. sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
  116. if (*p > end)
  117. goto bad;
  118. ceph_decode_need(p, end, sizeof(num) + 2, bad);
  119. num = ceph_decode_32(p);
  120. info->dir_end = ceph_decode_8(p);
  121. info->dir_complete = ceph_decode_8(p);
  122. if (num == 0)
  123. goto done;
  124. /* alloc large array */
  125. info->dir_nr = num;
  126. info->dir_in = kcalloc(num, sizeof(*info->dir_in) +
  127. sizeof(*info->dir_dname) +
  128. sizeof(*info->dir_dname_len) +
  129. sizeof(*info->dir_dlease),
  130. GFP_NOFS);
  131. if (info->dir_in == NULL) {
  132. err = -ENOMEM;
  133. goto out_bad;
  134. }
  135. info->dir_dname = (void *)(info->dir_in + num);
  136. info->dir_dname_len = (void *)(info->dir_dname + num);
  137. info->dir_dlease = (void *)(info->dir_dname_len + num);
  138. while (num) {
  139. /* dentry */
  140. ceph_decode_need(p, end, sizeof(u32)*2, bad);
  141. info->dir_dname_len[i] = ceph_decode_32(p);
  142. ceph_decode_need(p, end, info->dir_dname_len[i], bad);
  143. info->dir_dname[i] = *p;
  144. *p += info->dir_dname_len[i];
  145. dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
  146. info->dir_dname[i]);
  147. info->dir_dlease[i] = *p;
  148. *p += sizeof(struct ceph_mds_reply_lease);
  149. /* inode */
  150. err = parse_reply_info_in(p, end, &info->dir_in[i]);
  151. if (err < 0)
  152. goto out_bad;
  153. i++;
  154. num--;
  155. }
  156. done:
  157. if (*p != end)
  158. goto bad;
  159. return 0;
  160. bad:
  161. err = -EIO;
  162. out_bad:
  163. pr_err("problem parsing dir contents %d\n", err);
  164. return err;
  165. }
  166. /*
  167. * parse entire mds reply
  168. */
  169. static int parse_reply_info(struct ceph_msg *msg,
  170. struct ceph_mds_reply_info_parsed *info)
  171. {
  172. void *p, *end;
  173. u32 len;
  174. int err;
  175. info->head = msg->front.iov_base;
  176. p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
  177. end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
  178. /* trace */
  179. ceph_decode_32_safe(&p, end, len, bad);
  180. if (len > 0) {
  181. err = parse_reply_info_trace(&p, p+len, info);
  182. if (err < 0)
  183. goto out_bad;
  184. }
  185. /* dir content */
  186. ceph_decode_32_safe(&p, end, len, bad);
  187. if (len > 0) {
  188. err = parse_reply_info_dir(&p, p+len, info);
  189. if (err < 0)
  190. goto out_bad;
  191. }
  192. /* snap blob */
  193. ceph_decode_32_safe(&p, end, len, bad);
  194. info->snapblob_len = len;
  195. info->snapblob = p;
  196. p += len;
  197. if (p != end)
  198. goto bad;
  199. return 0;
  200. bad:
  201. err = -EIO;
  202. out_bad:
  203. pr_err("mds parse_reply err %d\n", err);
  204. return err;
  205. }
  206. static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
  207. {
  208. kfree(info->dir_in);
  209. }
  210. /*
  211. * sessions
  212. */
  213. static const char *session_state_name(int s)
  214. {
  215. switch (s) {
  216. case CEPH_MDS_SESSION_NEW: return "new";
  217. case CEPH_MDS_SESSION_OPENING: return "opening";
  218. case CEPH_MDS_SESSION_OPEN: return "open";
  219. case CEPH_MDS_SESSION_HUNG: return "hung";
  220. case CEPH_MDS_SESSION_CLOSING: return "closing";
  221. case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
  222. default: return "???";
  223. }
  224. }
  225. static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
  226. {
  227. if (atomic_inc_not_zero(&s->s_ref)) {
  228. dout("mdsc get_session %p %d -> %d\n", s,
  229. atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
  230. return s;
  231. } else {
  232. dout("mdsc get_session %p 0 -- FAIL", s);
  233. return NULL;
  234. }
  235. }
  236. void ceph_put_mds_session(struct ceph_mds_session *s)
  237. {
  238. dout("mdsc put_session %p %d -> %d\n", s,
  239. atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
  240. if (atomic_dec_and_test(&s->s_ref)) {
  241. if (s->s_authorizer)
  242. s->s_mdsc->client->monc.auth->ops->destroy_authorizer(
  243. s->s_mdsc->client->monc.auth, s->s_authorizer);
  244. kfree(s);
  245. }
  246. }
  247. /*
  248. * called under mdsc->mutex
  249. */
  250. struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
  251. int mds)
  252. {
  253. struct ceph_mds_session *session;
  254. if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
  255. return NULL;
  256. session = mdsc->sessions[mds];
  257. dout("lookup_mds_session %p %d\n", session,
  258. atomic_read(&session->s_ref));
  259. get_session(session);
  260. return session;
  261. }
  262. static bool __have_session(struct ceph_mds_client *mdsc, int mds)
  263. {
  264. if (mds >= mdsc->max_sessions)
  265. return false;
  266. return mdsc->sessions[mds];
  267. }
  268. /*
  269. * create+register a new session for given mds.
  270. * called under mdsc->mutex.
  271. */
  272. static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
  273. int mds)
  274. {
  275. struct ceph_mds_session *s;
  276. s = kzalloc(sizeof(*s), GFP_NOFS);
  277. s->s_mdsc = mdsc;
  278. s->s_mds = mds;
  279. s->s_state = CEPH_MDS_SESSION_NEW;
  280. s->s_ttl = 0;
  281. s->s_seq = 0;
  282. mutex_init(&s->s_mutex);
  283. ceph_con_init(mdsc->client->msgr, &s->s_con);
  284. s->s_con.private = s;
  285. s->s_con.ops = &mds_con_ops;
  286. s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
  287. s->s_con.peer_name.num = cpu_to_le64(mds);
  288. spin_lock_init(&s->s_cap_lock);
  289. s->s_cap_gen = 0;
  290. s->s_cap_ttl = 0;
  291. s->s_renew_requested = 0;
  292. s->s_renew_seq = 0;
  293. INIT_LIST_HEAD(&s->s_caps);
  294. s->s_nr_caps = 0;
  295. atomic_set(&s->s_ref, 1);
  296. INIT_LIST_HEAD(&s->s_waiting);
  297. INIT_LIST_HEAD(&s->s_unsafe);
  298. s->s_num_cap_releases = 0;
  299. INIT_LIST_HEAD(&s->s_cap_releases);
  300. INIT_LIST_HEAD(&s->s_cap_releases_done);
  301. INIT_LIST_HEAD(&s->s_cap_flushing);
  302. INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
  303. dout("register_session mds%d\n", mds);
  304. if (mds >= mdsc->max_sessions) {
  305. int newmax = 1 << get_count_order(mds+1);
  306. struct ceph_mds_session **sa;
  307. dout("register_session realloc to %d\n", newmax);
  308. sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
  309. if (sa == NULL)
  310. goto fail_realloc;
  311. if (mdsc->sessions) {
  312. memcpy(sa, mdsc->sessions,
  313. mdsc->max_sessions * sizeof(void *));
  314. kfree(mdsc->sessions);
  315. }
  316. mdsc->sessions = sa;
  317. mdsc->max_sessions = newmax;
  318. }
  319. mdsc->sessions[mds] = s;
  320. atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */
  321. ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
  322. return s;
  323. fail_realloc:
  324. kfree(s);
  325. return ERR_PTR(-ENOMEM);
  326. }
  327. /*
  328. * called under mdsc->mutex
  329. */
  330. static void unregister_session(struct ceph_mds_client *mdsc,
  331. struct ceph_mds_session *s)
  332. {
  333. dout("unregister_session mds%d %p\n", s->s_mds, s);
  334. mdsc->sessions[s->s_mds] = NULL;
  335. ceph_con_close(&s->s_con);
  336. ceph_put_mds_session(s);
  337. }
  338. /*
  339. * drop session refs in request.
  340. *
  341. * should be last request ref, or hold mdsc->mutex
  342. */
  343. static void put_request_session(struct ceph_mds_request *req)
  344. {
  345. if (req->r_session) {
  346. ceph_put_mds_session(req->r_session);
  347. req->r_session = NULL;
  348. }
  349. }
  350. void ceph_mdsc_release_request(struct kref *kref)
  351. {
  352. struct ceph_mds_request *req = container_of(kref,
  353. struct ceph_mds_request,
  354. r_kref);
  355. if (req->r_request)
  356. ceph_msg_put(req->r_request);
  357. if (req->r_reply) {
  358. ceph_msg_put(req->r_reply);
  359. destroy_reply_info(&req->r_reply_info);
  360. }
  361. if (req->r_inode) {
  362. ceph_put_cap_refs(ceph_inode(req->r_inode),
  363. CEPH_CAP_PIN);
  364. iput(req->r_inode);
  365. }
  366. if (req->r_locked_dir)
  367. ceph_put_cap_refs(ceph_inode(req->r_locked_dir),
  368. CEPH_CAP_PIN);
  369. if (req->r_target_inode)
  370. iput(req->r_target_inode);
  371. if (req->r_dentry)
  372. dput(req->r_dentry);
  373. if (req->r_old_dentry) {
  374. ceph_put_cap_refs(
  375. ceph_inode(req->r_old_dentry->d_parent->d_inode),
  376. CEPH_CAP_PIN);
  377. dput(req->r_old_dentry);
  378. }
  379. kfree(req->r_path1);
  380. kfree(req->r_path2);
  381. put_request_session(req);
  382. ceph_unreserve_caps(&req->r_caps_reservation);
  383. kfree(req);
  384. }
  385. /*
  386. * lookup session, bump ref if found.
  387. *
  388. * called under mdsc->mutex.
  389. */
  390. static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
  391. u64 tid)
  392. {
  393. struct ceph_mds_request *req;
  394. req = radix_tree_lookup(&mdsc->request_tree, tid);
  395. if (req)
  396. ceph_mdsc_get_request(req);
  397. return req;
  398. }
  399. /*
  400. * Register an in-flight request, and assign a tid. Link to directory
  401. * are modifying (if any).
  402. *
  403. * Called under mdsc->mutex.
  404. */
  405. static void __register_request(struct ceph_mds_client *mdsc,
  406. struct ceph_mds_request *req,
  407. struct inode *dir)
  408. {
  409. req->r_tid = ++mdsc->last_tid;
  410. if (req->r_num_caps)
  411. ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps);
  412. dout("__register_request %p tid %lld\n", req, req->r_tid);
  413. ceph_mdsc_get_request(req);
  414. radix_tree_insert(&mdsc->request_tree, req->r_tid, (void *)req);
  415. if (dir) {
  416. struct ceph_inode_info *ci = ceph_inode(dir);
  417. spin_lock(&ci->i_unsafe_lock);
  418. req->r_unsafe_dir = dir;
  419. list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
  420. spin_unlock(&ci->i_unsafe_lock);
  421. }
  422. }
  423. static void __unregister_request(struct ceph_mds_client *mdsc,
  424. struct ceph_mds_request *req)
  425. {
  426. dout("__unregister_request %p tid %lld\n", req, req->r_tid);
  427. radix_tree_delete(&mdsc->request_tree, req->r_tid);
  428. ceph_mdsc_put_request(req);
  429. if (req->r_unsafe_dir) {
  430. struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
  431. spin_lock(&ci->i_unsafe_lock);
  432. list_del_init(&req->r_unsafe_dir_item);
  433. spin_unlock(&ci->i_unsafe_lock);
  434. }
  435. }
  436. /*
  437. * Choose mds to send request to next. If there is a hint set in the
  438. * request (e.g., due to a prior forward hint from the mds), use that.
  439. * Otherwise, consult frag tree and/or caps to identify the
  440. * appropriate mds. If all else fails, choose randomly.
  441. *
  442. * Called under mdsc->mutex.
  443. */
  444. static int __choose_mds(struct ceph_mds_client *mdsc,
  445. struct ceph_mds_request *req)
  446. {
  447. struct inode *inode;
  448. struct ceph_inode_info *ci;
  449. struct ceph_cap *cap;
  450. int mode = req->r_direct_mode;
  451. int mds = -1;
  452. u32 hash = req->r_direct_hash;
  453. bool is_hash = req->r_direct_is_hash;
  454. /*
  455. * is there a specific mds we should try? ignore hint if we have
  456. * no session and the mds is not up (active or recovering).
  457. */
  458. if (req->r_resend_mds >= 0 &&
  459. (__have_session(mdsc, req->r_resend_mds) ||
  460. ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
  461. dout("choose_mds using resend_mds mds%d\n",
  462. req->r_resend_mds);
  463. return req->r_resend_mds;
  464. }
  465. if (mode == USE_RANDOM_MDS)
  466. goto random;
  467. inode = NULL;
  468. if (req->r_inode) {
  469. inode = req->r_inode;
  470. } else if (req->r_dentry) {
  471. if (req->r_dentry->d_inode) {
  472. inode = req->r_dentry->d_inode;
  473. } else {
  474. inode = req->r_dentry->d_parent->d_inode;
  475. hash = req->r_dentry->d_name.hash;
  476. is_hash = true;
  477. }
  478. }
  479. dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
  480. (int)hash, mode);
  481. if (!inode)
  482. goto random;
  483. ci = ceph_inode(inode);
  484. if (is_hash && S_ISDIR(inode->i_mode)) {
  485. struct ceph_inode_frag frag;
  486. int found;
  487. ceph_choose_frag(ci, hash, &frag, &found);
  488. if (found) {
  489. if (mode == USE_ANY_MDS && frag.ndist > 0) {
  490. u8 r;
  491. /* choose a random replica */
  492. get_random_bytes(&r, 1);
  493. r %= frag.ndist;
  494. mds = frag.dist[r];
  495. dout("choose_mds %p %llx.%llx "
  496. "frag %u mds%d (%d/%d)\n",
  497. inode, ceph_vinop(inode),
  498. frag.frag, frag.mds,
  499. (int)r, frag.ndist);
  500. return mds;
  501. }
  502. /* since this file/dir wasn't known to be
  503. * replicated, then we want to look for the
  504. * authoritative mds. */
  505. mode = USE_AUTH_MDS;
  506. if (frag.mds >= 0) {
  507. /* choose auth mds */
  508. mds = frag.mds;
  509. dout("choose_mds %p %llx.%llx "
  510. "frag %u mds%d (auth)\n",
  511. inode, ceph_vinop(inode), frag.frag, mds);
  512. return mds;
  513. }
  514. }
  515. }
  516. spin_lock(&inode->i_lock);
  517. cap = NULL;
  518. if (mode == USE_AUTH_MDS)
  519. cap = ci->i_auth_cap;
  520. if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
  521. cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
  522. if (!cap) {
  523. spin_unlock(&inode->i_lock);
  524. goto random;
  525. }
  526. mds = cap->session->s_mds;
  527. dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
  528. inode, ceph_vinop(inode), mds,
  529. cap == ci->i_auth_cap ? "auth " : "", cap);
  530. spin_unlock(&inode->i_lock);
  531. return mds;
  532. random:
  533. mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
  534. dout("choose_mds chose random mds%d\n", mds);
  535. return mds;
  536. }
  537. /*
  538. * session messages
  539. */
  540. static struct ceph_msg *create_session_msg(u32 op, u64 seq)
  541. {
  542. struct ceph_msg *msg;
  543. struct ceph_mds_session_head *h;
  544. msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL);
  545. if (IS_ERR(msg)) {
  546. pr_err("create_session_msg ENOMEM creating msg\n");
  547. return ERR_PTR(PTR_ERR(msg));
  548. }
  549. h = msg->front.iov_base;
  550. h->op = cpu_to_le32(op);
  551. h->seq = cpu_to_le64(seq);
  552. return msg;
  553. }
  554. /*
  555. * send session open request.
  556. *
  557. * called under mdsc->mutex
  558. */
  559. static int __open_session(struct ceph_mds_client *mdsc,
  560. struct ceph_mds_session *session)
  561. {
  562. struct ceph_msg *msg;
  563. int mstate;
  564. int mds = session->s_mds;
  565. int err = 0;
  566. /* wait for mds to go active? */
  567. mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
  568. dout("open_session to mds%d (%s)\n", mds,
  569. ceph_mds_state_name(mstate));
  570. session->s_state = CEPH_MDS_SESSION_OPENING;
  571. session->s_renew_requested = jiffies;
  572. /* send connect message */
  573. msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
  574. if (IS_ERR(msg)) {
  575. err = PTR_ERR(msg);
  576. goto out;
  577. }
  578. ceph_con_send(&session->s_con, msg);
  579. out:
  580. return 0;
  581. }
  582. /*
  583. * session caps
  584. */
  585. /*
  586. * Free preallocated cap messages assigned to this session
  587. */
  588. static void cleanup_cap_releases(struct ceph_mds_session *session)
  589. {
  590. struct ceph_msg *msg;
  591. spin_lock(&session->s_cap_lock);
  592. while (!list_empty(&session->s_cap_releases)) {
  593. msg = list_first_entry(&session->s_cap_releases,
  594. struct ceph_msg, list_head);
  595. list_del_init(&msg->list_head);
  596. ceph_msg_put(msg);
  597. }
  598. while (!list_empty(&session->s_cap_releases_done)) {
  599. msg = list_first_entry(&session->s_cap_releases_done,
  600. struct ceph_msg, list_head);
  601. list_del_init(&msg->list_head);
  602. ceph_msg_put(msg);
  603. }
  604. spin_unlock(&session->s_cap_lock);
  605. }
  606. /*
  607. * Helper to safely iterate over all caps associated with a session.
  608. *
  609. * caller must hold session s_mutex
  610. */
  611. static int iterate_session_caps(struct ceph_mds_session *session,
  612. int (*cb)(struct inode *, struct ceph_cap *,
  613. void *), void *arg)
  614. {
  615. struct ceph_cap *cap, *ncap;
  616. struct inode *inode;
  617. int ret;
  618. dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
  619. spin_lock(&session->s_cap_lock);
  620. list_for_each_entry_safe(cap, ncap, &session->s_caps, session_caps) {
  621. inode = igrab(&cap->ci->vfs_inode);
  622. if (!inode)
  623. continue;
  624. spin_unlock(&session->s_cap_lock);
  625. ret = cb(inode, cap, arg);
  626. iput(inode);
  627. if (ret < 0)
  628. return ret;
  629. spin_lock(&session->s_cap_lock);
  630. }
  631. spin_unlock(&session->s_cap_lock);
  632. return 0;
  633. }
  634. static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
  635. void *arg)
  636. {
  637. struct ceph_inode_info *ci = ceph_inode(inode);
  638. dout("removing cap %p, ci is %p, inode is %p\n",
  639. cap, ci, &ci->vfs_inode);
  640. ceph_remove_cap(cap);
  641. return 0;
  642. }
  643. /*
  644. * caller must hold session s_mutex
  645. */
  646. static void remove_session_caps(struct ceph_mds_session *session)
  647. {
  648. dout("remove_session_caps on %p\n", session);
  649. iterate_session_caps(session, remove_session_caps_cb, NULL);
  650. BUG_ON(session->s_nr_caps > 0);
  651. cleanup_cap_releases(session);
  652. }
  653. /*
  654. * wake up any threads waiting on this session's caps. if the cap is
  655. * old (didn't get renewed on the client reconnect), remove it now.
  656. *
  657. * caller must hold s_mutex.
  658. */
  659. static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
  660. void *arg)
  661. {
  662. struct ceph_inode_info *ci = ceph_inode(inode);
  663. wake_up(&ci->i_cap_wq);
  664. if (arg) {
  665. spin_lock(&inode->i_lock);
  666. ci->i_wanted_max_size = 0;
  667. ci->i_requested_max_size = 0;
  668. spin_unlock(&inode->i_lock);
  669. }
  670. return 0;
  671. }
  672. static void wake_up_session_caps(struct ceph_mds_session *session,
  673. int reconnect)
  674. {
  675. dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
  676. iterate_session_caps(session, wake_up_session_cb,
  677. (void *)(unsigned long)reconnect);
  678. }
  679. /*
  680. * Send periodic message to MDS renewing all currently held caps. The
  681. * ack will reset the expiration for all caps from this session.
  682. *
  683. * caller holds s_mutex
  684. */
  685. static int send_renew_caps(struct ceph_mds_client *mdsc,
  686. struct ceph_mds_session *session)
  687. {
  688. struct ceph_msg *msg;
  689. int state;
  690. if (time_after_eq(jiffies, session->s_cap_ttl) &&
  691. time_after_eq(session->s_cap_ttl, session->s_renew_requested))
  692. pr_info("mds%d caps stale\n", session->s_mds);
  693. /* do not try to renew caps until a recovering mds has reconnected
  694. * with its clients. */
  695. state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
  696. if (state < CEPH_MDS_STATE_RECONNECT) {
  697. dout("send_renew_caps ignoring mds%d (%s)\n",
  698. session->s_mds, ceph_mds_state_name(state));
  699. return 0;
  700. }
  701. dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
  702. ceph_mds_state_name(state));
  703. session->s_renew_requested = jiffies;
  704. msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
  705. ++session->s_renew_seq);
  706. if (IS_ERR(msg))
  707. return PTR_ERR(msg);
  708. ceph_con_send(&session->s_con, msg);
  709. return 0;
  710. }
  711. /*
  712. * Note new cap ttl, and any transition from stale -> not stale (fresh?).
  713. *
  714. * Called under session->s_mutex
  715. */
  716. static void renewed_caps(struct ceph_mds_client *mdsc,
  717. struct ceph_mds_session *session, int is_renew)
  718. {
  719. int was_stale;
  720. int wake = 0;
  721. spin_lock(&session->s_cap_lock);
  722. was_stale = is_renew && (session->s_cap_ttl == 0 ||
  723. time_after_eq(jiffies, session->s_cap_ttl));
  724. session->s_cap_ttl = session->s_renew_requested +
  725. mdsc->mdsmap->m_session_timeout*HZ;
  726. if (was_stale) {
  727. if (time_before(jiffies, session->s_cap_ttl)) {
  728. pr_info("mds%d caps renewed\n", session->s_mds);
  729. wake = 1;
  730. } else {
  731. pr_info("mds%d caps still stale\n", session->s_mds);
  732. }
  733. }
  734. dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
  735. session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
  736. time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
  737. spin_unlock(&session->s_cap_lock);
  738. if (wake)
  739. wake_up_session_caps(session, 0);
  740. }
  741. /*
  742. * send a session close request
  743. */
  744. static int request_close_session(struct ceph_mds_client *mdsc,
  745. struct ceph_mds_session *session)
  746. {
  747. struct ceph_msg *msg;
  748. int err = 0;
  749. dout("request_close_session mds%d state %s seq %lld\n",
  750. session->s_mds, session_state_name(session->s_state),
  751. session->s_seq);
  752. msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
  753. if (IS_ERR(msg))
  754. err = PTR_ERR(msg);
  755. else
  756. ceph_con_send(&session->s_con, msg);
  757. return err;
  758. }
  759. /*
  760. * Called with s_mutex held.
  761. */
  762. static int __close_session(struct ceph_mds_client *mdsc,
  763. struct ceph_mds_session *session)
  764. {
  765. if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
  766. return 0;
  767. session->s_state = CEPH_MDS_SESSION_CLOSING;
  768. return request_close_session(mdsc, session);
  769. }
  770. /*
  771. * Trim old(er) caps.
  772. *
  773. * Because we can't cache an inode without one or more caps, we do
  774. * this indirectly: if a cap is unused, we prune its aliases, at which
  775. * point the inode will hopefully get dropped to.
  776. *
  777. * Yes, this is a bit sloppy. Our only real goal here is to respond to
  778. * memory pressure from the MDS, though, so it needn't be perfect.
  779. */
  780. static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
  781. {
  782. struct ceph_mds_session *session = arg;
  783. struct ceph_inode_info *ci = ceph_inode(inode);
  784. int used, oissued, mine;
  785. if (session->s_trim_caps <= 0)
  786. return -1;
  787. spin_lock(&inode->i_lock);
  788. mine = cap->issued | cap->implemented;
  789. used = __ceph_caps_used(ci);
  790. oissued = __ceph_caps_issued_other(ci, cap);
  791. dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
  792. inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
  793. ceph_cap_string(used));
  794. if (ci->i_dirty_caps)
  795. goto out; /* dirty caps */
  796. if ((used & ~oissued) & mine)
  797. goto out; /* we need these caps */
  798. session->s_trim_caps--;
  799. if (oissued) {
  800. /* we aren't the only cap.. just remove us */
  801. __ceph_remove_cap(cap, NULL);
  802. } else {
  803. /* try to drop referring dentries */
  804. spin_unlock(&inode->i_lock);
  805. d_prune_aliases(inode);
  806. dout("trim_caps_cb %p cap %p pruned, count now %d\n",
  807. inode, cap, atomic_read(&inode->i_count));
  808. return 0;
  809. }
  810. out:
  811. spin_unlock(&inode->i_lock);
  812. return 0;
  813. }
  814. /*
  815. * Trim session cap count down to some max number.
  816. */
  817. static int trim_caps(struct ceph_mds_client *mdsc,
  818. struct ceph_mds_session *session,
  819. int max_caps)
  820. {
  821. int trim_caps = session->s_nr_caps - max_caps;
  822. dout("trim_caps mds%d start: %d / %d, trim %d\n",
  823. session->s_mds, session->s_nr_caps, max_caps, trim_caps);
  824. if (trim_caps > 0) {
  825. session->s_trim_caps = trim_caps;
  826. iterate_session_caps(session, trim_caps_cb, session);
  827. dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
  828. session->s_mds, session->s_nr_caps, max_caps,
  829. trim_caps - session->s_trim_caps);
  830. }
  831. return 0;
  832. }
  833. /*
  834. * Allocate cap_release messages. If there is a partially full message
  835. * in the queue, try to allocate enough to cover it's remainder, so that
  836. * we can send it immediately.
  837. *
  838. * Called under s_mutex.
  839. */
  840. static int add_cap_releases(struct ceph_mds_client *mdsc,
  841. struct ceph_mds_session *session,
  842. int extra)
  843. {
  844. struct ceph_msg *msg;
  845. struct ceph_mds_cap_release *head;
  846. int err = -ENOMEM;
  847. if (extra < 0)
  848. extra = mdsc->client->mount_args->cap_release_safety;
  849. spin_lock(&session->s_cap_lock);
  850. if (!list_empty(&session->s_cap_releases)) {
  851. msg = list_first_entry(&session->s_cap_releases,
  852. struct ceph_msg,
  853. list_head);
  854. head = msg->front.iov_base;
  855. extra += CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
  856. }
  857. while (session->s_num_cap_releases < session->s_nr_caps + extra) {
  858. spin_unlock(&session->s_cap_lock);
  859. msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
  860. 0, 0, NULL);
  861. if (!msg)
  862. goto out_unlocked;
  863. dout("add_cap_releases %p msg %p now %d\n", session, msg,
  864. (int)msg->front.iov_len);
  865. head = msg->front.iov_base;
  866. head->num = cpu_to_le32(0);
  867. msg->front.iov_len = sizeof(*head);
  868. spin_lock(&session->s_cap_lock);
  869. list_add(&msg->list_head, &session->s_cap_releases);
  870. session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE;
  871. }
  872. if (!list_empty(&session->s_cap_releases)) {
  873. msg = list_first_entry(&session->s_cap_releases,
  874. struct ceph_msg,
  875. list_head);
  876. head = msg->front.iov_base;
  877. if (head->num) {
  878. dout(" queueing non-full %p (%d)\n", msg,
  879. le32_to_cpu(head->num));
  880. list_move_tail(&msg->list_head,
  881. &session->s_cap_releases_done);
  882. session->s_num_cap_releases -=
  883. CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
  884. }
  885. }
  886. err = 0;
  887. spin_unlock(&session->s_cap_lock);
  888. out_unlocked:
  889. return err;
  890. }
  891. /*
  892. * flush all dirty inode data to disk.
  893. *
  894. * returns true if we've flushed through want_flush_seq
  895. */
  896. static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
  897. {
  898. int mds, ret = 1;
  899. dout("check_cap_flush want %lld\n", want_flush_seq);
  900. mutex_lock(&mdsc->mutex);
  901. for (mds = 0; ret && mds < mdsc->max_sessions; mds++) {
  902. struct ceph_mds_session *session = mdsc->sessions[mds];
  903. if (!session)
  904. continue;
  905. get_session(session);
  906. mutex_unlock(&mdsc->mutex);
  907. mutex_lock(&session->s_mutex);
  908. if (!list_empty(&session->s_cap_flushing)) {
  909. struct ceph_inode_info *ci =
  910. list_entry(session->s_cap_flushing.next,
  911. struct ceph_inode_info,
  912. i_flushing_item);
  913. struct inode *inode = &ci->vfs_inode;
  914. spin_lock(&inode->i_lock);
  915. if (ci->i_cap_flush_seq <= want_flush_seq) {
  916. dout("check_cap_flush still flushing %p "
  917. "seq %lld <= %lld to mds%d\n", inode,
  918. ci->i_cap_flush_seq, want_flush_seq,
  919. session->s_mds);
  920. ret = 0;
  921. }
  922. spin_unlock(&inode->i_lock);
  923. }
  924. mutex_unlock(&session->s_mutex);
  925. ceph_put_mds_session(session);
  926. if (!ret)
  927. return ret;
  928. mutex_lock(&mdsc->mutex);
  929. }
  930. mutex_unlock(&mdsc->mutex);
  931. dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
  932. return ret;
  933. }
  934. /*
  935. * called under s_mutex
  936. */
  937. static void send_cap_releases(struct ceph_mds_client *mdsc,
  938. struct ceph_mds_session *session)
  939. {
  940. struct ceph_msg *msg;
  941. dout("send_cap_releases mds%d\n", session->s_mds);
  942. while (1) {
  943. spin_lock(&session->s_cap_lock);
  944. if (list_empty(&session->s_cap_releases_done))
  945. break;
  946. msg = list_first_entry(&session->s_cap_releases_done,
  947. struct ceph_msg, list_head);
  948. list_del_init(&msg->list_head);
  949. spin_unlock(&session->s_cap_lock);
  950. msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
  951. dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
  952. ceph_con_send(&session->s_con, msg);
  953. }
  954. spin_unlock(&session->s_cap_lock);
  955. }
  956. /*
  957. * requests
  958. */
  959. /*
  960. * Create an mds request.
  961. */
  962. struct ceph_mds_request *
  963. ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
  964. {
  965. struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
  966. if (!req)
  967. return ERR_PTR(-ENOMEM);
  968. req->r_started = jiffies;
  969. req->r_resend_mds = -1;
  970. INIT_LIST_HEAD(&req->r_unsafe_dir_item);
  971. req->r_fmode = -1;
  972. kref_init(&req->r_kref);
  973. INIT_LIST_HEAD(&req->r_wait);
  974. init_completion(&req->r_completion);
  975. init_completion(&req->r_safe_completion);
  976. INIT_LIST_HEAD(&req->r_unsafe_item);
  977. req->r_op = op;
  978. req->r_direct_mode = mode;
  979. return req;
  980. }
  981. /*
  982. * return oldest (lowest) tid in request tree, 0 if none.
  983. *
  984. * called under mdsc->mutex.
  985. */
  986. static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
  987. {
  988. struct ceph_mds_request *first;
  989. if (radix_tree_gang_lookup(&mdsc->request_tree,
  990. (void **)&first, 0, 1) <= 0)
  991. return 0;
  992. return first->r_tid;
  993. }
  994. /*
  995. * Build a dentry's path. Allocate on heap; caller must kfree. Based
  996. * on build_path_from_dentry in fs/cifs/dir.c.
  997. *
  998. * If @stop_on_nosnap, generate path relative to the first non-snapped
  999. * inode.
  1000. *
  1001. * Encode hidden .snap dirs as a double /, i.e.
  1002. * foo/.snap/bar -> foo//bar
  1003. */
  1004. char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
  1005. int stop_on_nosnap)
  1006. {
  1007. struct dentry *temp;
  1008. char *path;
  1009. int len, pos;
  1010. if (dentry == NULL)
  1011. return ERR_PTR(-EINVAL);
  1012. retry:
  1013. len = 0;
  1014. for (temp = dentry; !IS_ROOT(temp);) {
  1015. struct inode *inode = temp->d_inode;
  1016. if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
  1017. len++; /* slash only */
  1018. else if (stop_on_nosnap && inode &&
  1019. ceph_snap(inode) == CEPH_NOSNAP)
  1020. break;
  1021. else
  1022. len += 1 + temp->d_name.len;
  1023. temp = temp->d_parent;
  1024. if (temp == NULL) {
  1025. pr_err("build_path_dentry corrupt dentry %p\n", dentry);
  1026. return ERR_PTR(-EINVAL);
  1027. }
  1028. }
  1029. if (len)
  1030. len--; /* no leading '/' */
  1031. path = kmalloc(len+1, GFP_NOFS);
  1032. if (path == NULL)
  1033. return ERR_PTR(-ENOMEM);
  1034. pos = len;
  1035. path[pos] = 0; /* trailing null */
  1036. for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
  1037. struct inode *inode = temp->d_inode;
  1038. if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
  1039. dout("build_path_dentry path+%d: %p SNAPDIR\n",
  1040. pos, temp);
  1041. } else if (stop_on_nosnap && inode &&
  1042. ceph_snap(inode) == CEPH_NOSNAP) {
  1043. break;
  1044. } else {
  1045. pos -= temp->d_name.len;
  1046. if (pos < 0)
  1047. break;
  1048. strncpy(path + pos, temp->d_name.name,
  1049. temp->d_name.len);
  1050. dout("build_path_dentry path+%d: %p '%.*s'\n",
  1051. pos, temp, temp->d_name.len, path + pos);
  1052. }
  1053. if (pos)
  1054. path[--pos] = '/';
  1055. temp = temp->d_parent;
  1056. if (temp == NULL) {
  1057. pr_err("build_path_dentry corrupt dentry\n");
  1058. kfree(path);
  1059. return ERR_PTR(-EINVAL);
  1060. }
  1061. }
  1062. if (pos != 0) {
  1063. pr_err("build_path_dentry did not end path lookup where "
  1064. "expected, namelen is %d, pos is %d\n", len, pos);
  1065. /* presumably this is only possible if racing with a
  1066. rename of one of the parent directories (we can not
  1067. lock the dentries above us to prevent this, but
  1068. retrying should be harmless) */
  1069. kfree(path);
  1070. goto retry;
  1071. }
  1072. *base = ceph_ino(temp->d_inode);
  1073. *plen = len;
  1074. dout("build_path_dentry on %p %d built %llx '%.*s'\n",
  1075. dentry, atomic_read(&dentry->d_count), *base, len, path);
  1076. return path;
  1077. }
  1078. static int build_dentry_path(struct dentry *dentry,
  1079. const char **ppath, int *ppathlen, u64 *pino,
  1080. int *pfreepath)
  1081. {
  1082. char *path;
  1083. if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) {
  1084. *pino = ceph_ino(dentry->d_parent->d_inode);
  1085. *ppath = dentry->d_name.name;
  1086. *ppathlen = dentry->d_name.len;
  1087. return 0;
  1088. }
  1089. path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
  1090. if (IS_ERR(path))
  1091. return PTR_ERR(path);
  1092. *ppath = path;
  1093. *pfreepath = 1;
  1094. return 0;
  1095. }
  1096. static int build_inode_path(struct inode *inode,
  1097. const char **ppath, int *ppathlen, u64 *pino,
  1098. int *pfreepath)
  1099. {
  1100. struct dentry *dentry;
  1101. char *path;
  1102. if (ceph_snap(inode) == CEPH_NOSNAP) {
  1103. *pino = ceph_ino(inode);
  1104. *ppathlen = 0;
  1105. return 0;
  1106. }
  1107. dentry = d_find_alias(inode);
  1108. path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
  1109. dput(dentry);
  1110. if (IS_ERR(path))
  1111. return PTR_ERR(path);
  1112. *ppath = path;
  1113. *pfreepath = 1;
  1114. return 0;
  1115. }
  1116. /*
  1117. * request arguments may be specified via an inode *, a dentry *, or
  1118. * an explicit ino+path.
  1119. */
  1120. static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
  1121. const char *rpath, u64 rino,
  1122. const char **ppath, int *pathlen,
  1123. u64 *ino, int *freepath)
  1124. {
  1125. int r = 0;
  1126. if (rinode) {
  1127. r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
  1128. dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
  1129. ceph_snap(rinode));
  1130. } else if (rdentry) {
  1131. r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
  1132. dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
  1133. *ppath);
  1134. } else if (rpath) {
  1135. *ino = rino;
  1136. *ppath = rpath;
  1137. *pathlen = strlen(rpath);
  1138. dout(" path %.*s\n", *pathlen, rpath);
  1139. }
  1140. return r;
  1141. }
  1142. /*
  1143. * called under mdsc->mutex
  1144. */
  1145. static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
  1146. struct ceph_mds_request *req,
  1147. int mds)
  1148. {
  1149. struct ceph_msg *msg;
  1150. struct ceph_mds_request_head *head;
  1151. const char *path1 = NULL;
  1152. const char *path2 = NULL;
  1153. u64 ino1 = 0, ino2 = 0;
  1154. int pathlen1 = 0, pathlen2 = 0;
  1155. int freepath1 = 0, freepath2 = 0;
  1156. int len;
  1157. u16 releases;
  1158. void *p, *end;
  1159. int ret;
  1160. ret = set_request_path_attr(req->r_inode, req->r_dentry,
  1161. req->r_path1, req->r_ino1.ino,
  1162. &path1, &pathlen1, &ino1, &freepath1);
  1163. if (ret < 0) {
  1164. msg = ERR_PTR(ret);
  1165. goto out;
  1166. }
  1167. ret = set_request_path_attr(NULL, req->r_old_dentry,
  1168. req->r_path2, req->r_ino2.ino,
  1169. &path2, &pathlen2, &ino2, &freepath2);
  1170. if (ret < 0) {
  1171. msg = ERR_PTR(ret);
  1172. goto out_free1;
  1173. }
  1174. len = sizeof(*head) +
  1175. pathlen1 + pathlen2 + 2*(sizeof(u32) + sizeof(u64));
  1176. /* calculate (max) length for cap releases */
  1177. len += sizeof(struct ceph_mds_request_release) *
  1178. (!!req->r_inode_drop + !!req->r_dentry_drop +
  1179. !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
  1180. if (req->r_dentry_drop)
  1181. len += req->r_dentry->d_name.len;
  1182. if (req->r_old_dentry_drop)
  1183. len += req->r_old_dentry->d_name.len;
  1184. msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL);
  1185. if (IS_ERR(msg))
  1186. goto out_free2;
  1187. head = msg->front.iov_base;
  1188. p = msg->front.iov_base + sizeof(*head);
  1189. end = msg->front.iov_base + msg->front.iov_len;
  1190. head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
  1191. head->op = cpu_to_le32(req->r_op);
  1192. head->caller_uid = cpu_to_le32(current_fsuid());
  1193. head->caller_gid = cpu_to_le32(current_fsgid());
  1194. head->args = req->r_args;
  1195. ceph_encode_filepath(&p, end, ino1, path1);
  1196. ceph_encode_filepath(&p, end, ino2, path2);
  1197. /* cap releases */
  1198. releases = 0;
  1199. if (req->r_inode_drop)
  1200. releases += ceph_encode_inode_release(&p,
  1201. req->r_inode ? req->r_inode : req->r_dentry->d_inode,
  1202. mds, req->r_inode_drop, req->r_inode_unless, 0);
  1203. if (req->r_dentry_drop)
  1204. releases += ceph_encode_dentry_release(&p, req->r_dentry,
  1205. mds, req->r_dentry_drop, req->r_dentry_unless);
  1206. if (req->r_old_dentry_drop)
  1207. releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
  1208. mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
  1209. if (req->r_old_inode_drop)
  1210. releases += ceph_encode_inode_release(&p,
  1211. req->r_old_dentry->d_inode,
  1212. mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
  1213. head->num_releases = cpu_to_le16(releases);
  1214. BUG_ON(p > end);
  1215. msg->front.iov_len = p - msg->front.iov_base;
  1216. msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
  1217. msg->pages = req->r_pages;
  1218. msg->nr_pages = req->r_num_pages;
  1219. msg->hdr.data_len = cpu_to_le32(req->r_data_len);
  1220. msg->hdr.data_off = cpu_to_le16(0);
  1221. out_free2:
  1222. if (freepath2)
  1223. kfree((char *)path2);
  1224. out_free1:
  1225. if (freepath1)
  1226. kfree((char *)path1);
  1227. out:
  1228. return msg;
  1229. }
  1230. /*
  1231. * called under mdsc->mutex if error, under no mutex if
  1232. * success.
  1233. */
  1234. static void complete_request(struct ceph_mds_client *mdsc,
  1235. struct ceph_mds_request *req)
  1236. {
  1237. if (req->r_callback)
  1238. req->r_callback(mdsc, req);
  1239. else
  1240. complete(&req->r_completion);
  1241. }
  1242. /*
  1243. * called under mdsc->mutex
  1244. */
  1245. static int __prepare_send_request(struct ceph_mds_client *mdsc,
  1246. struct ceph_mds_request *req,
  1247. int mds)
  1248. {
  1249. struct ceph_mds_request_head *rhead;
  1250. struct ceph_msg *msg;
  1251. int flags = 0;
  1252. req->r_mds = mds;
  1253. req->r_attempts++;
  1254. dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
  1255. req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
  1256. if (req->r_request) {
  1257. ceph_msg_put(req->r_request);
  1258. req->r_request = NULL;
  1259. }
  1260. msg = create_request_message(mdsc, req, mds);
  1261. if (IS_ERR(msg)) {
  1262. req->r_reply = ERR_PTR(PTR_ERR(msg));
  1263. complete_request(mdsc, req);
  1264. return -PTR_ERR(msg);
  1265. }
  1266. req->r_request = msg;
  1267. rhead = msg->front.iov_base;
  1268. rhead->tid = cpu_to_le64(req->r_tid);
  1269. rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
  1270. if (req->r_got_unsafe)
  1271. flags |= CEPH_MDS_FLAG_REPLAY;
  1272. if (req->r_locked_dir)
  1273. flags |= CEPH_MDS_FLAG_WANT_DENTRY;
  1274. rhead->flags = cpu_to_le32(flags);
  1275. rhead->num_fwd = req->r_num_fwd;
  1276. rhead->num_retry = req->r_attempts - 1;
  1277. dout(" r_locked_dir = %p\n", req->r_locked_dir);
  1278. if (req->r_target_inode && req->r_got_unsafe)
  1279. rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
  1280. else
  1281. rhead->ino = 0;
  1282. return 0;
  1283. }
  1284. /*
  1285. * send request, or put it on the appropriate wait list.
  1286. */
  1287. static int __do_request(struct ceph_mds_client *mdsc,
  1288. struct ceph_mds_request *req)
  1289. {
  1290. struct ceph_mds_session *session = NULL;
  1291. int mds = -1;
  1292. int err = -EAGAIN;
  1293. if (req->r_reply)
  1294. goto out;
  1295. if (req->r_timeout &&
  1296. time_after_eq(jiffies, req->r_started + req->r_timeout)) {
  1297. dout("do_request timed out\n");
  1298. err = -EIO;
  1299. goto finish;
  1300. }
  1301. mds = __choose_mds(mdsc, req);
  1302. if (mds < 0 ||
  1303. ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
  1304. dout("do_request no mds or not active, waiting for map\n");
  1305. list_add(&req->r_wait, &mdsc->waiting_for_map);
  1306. goto out;
  1307. }
  1308. /* get, open session */
  1309. session = __ceph_lookup_mds_session(mdsc, mds);
  1310. if (!session)
  1311. session = register_session(mdsc, mds);
  1312. dout("do_request mds%d session %p state %s\n", mds, session,
  1313. session_state_name(session->s_state));
  1314. if (session->s_state != CEPH_MDS_SESSION_OPEN &&
  1315. session->s_state != CEPH_MDS_SESSION_HUNG) {
  1316. if (session->s_state == CEPH_MDS_SESSION_NEW ||
  1317. session->s_state == CEPH_MDS_SESSION_CLOSING)
  1318. __open_session(mdsc, session);
  1319. list_add(&req->r_wait, &session->s_waiting);
  1320. goto out_session;
  1321. }
  1322. /* send request */
  1323. req->r_session = get_session(session);
  1324. req->r_resend_mds = -1; /* forget any previous mds hint */
  1325. if (req->r_request_started == 0) /* note request start time */
  1326. req->r_request_started = jiffies;
  1327. err = __prepare_send_request(mdsc, req, mds);
  1328. if (!err) {
  1329. ceph_msg_get(req->r_request);
  1330. ceph_con_send(&session->s_con, req->r_request);
  1331. }
  1332. out_session:
  1333. ceph_put_mds_session(session);
  1334. out:
  1335. return err;
  1336. finish:
  1337. req->r_reply = ERR_PTR(err);
  1338. complete_request(mdsc, req);
  1339. goto out;
  1340. }
  1341. /*
  1342. * called under mdsc->mutex
  1343. */
  1344. static void __wake_requests(struct ceph_mds_client *mdsc,
  1345. struct list_head *head)
  1346. {
  1347. struct ceph_mds_request *req, *nreq;
  1348. list_for_each_entry_safe(req, nreq, head, r_wait) {
  1349. list_del_init(&req->r_wait);
  1350. __do_request(mdsc, req);
  1351. }
  1352. }
  1353. /*
  1354. * Wake up threads with requests pending for @mds, so that they can
  1355. * resubmit their requests to a possibly different mds. If @all is set,
  1356. * wake up if their requests has been forwarded to @mds, too.
  1357. */
  1358. static void kick_requests(struct ceph_mds_client *mdsc, int mds, int all)
  1359. {
  1360. struct ceph_mds_request *reqs[10];
  1361. u64 nexttid = 0;
  1362. int i, got;
  1363. dout("kick_requests mds%d\n", mds);
  1364. while (nexttid <= mdsc->last_tid) {
  1365. got = radix_tree_gang_lookup(&mdsc->request_tree,
  1366. (void **)&reqs, nexttid, 10);
  1367. if (got == 0)
  1368. break;
  1369. nexttid = reqs[got-1]->r_tid + 1;
  1370. for (i = 0; i < got; i++) {
  1371. if (reqs[i]->r_got_unsafe)
  1372. continue;
  1373. if (reqs[i]->r_session &&
  1374. reqs[i]->r_session->s_mds == mds) {
  1375. dout(" kicking tid %llu\n", reqs[i]->r_tid);
  1376. put_request_session(reqs[i]);
  1377. __do_request(mdsc, reqs[i]);
  1378. }
  1379. }
  1380. }
  1381. }
  1382. void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
  1383. struct ceph_mds_request *req)
  1384. {
  1385. dout("submit_request on %p\n", req);
  1386. mutex_lock(&mdsc->mutex);
  1387. __register_request(mdsc, req, NULL);
  1388. __do_request(mdsc, req);
  1389. mutex_unlock(&mdsc->mutex);
  1390. }
  1391. /*
  1392. * Synchrously perform an mds request. Take care of all of the
  1393. * session setup, forwarding, retry details.
  1394. */
  1395. int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
  1396. struct inode *dir,
  1397. struct ceph_mds_request *req)
  1398. {
  1399. int err;
  1400. dout("do_request on %p\n", req);
  1401. /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
  1402. if (req->r_inode)
  1403. ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
  1404. if (req->r_locked_dir)
  1405. ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
  1406. if (req->r_old_dentry)
  1407. ceph_get_cap_refs(
  1408. ceph_inode(req->r_old_dentry->d_parent->d_inode),
  1409. CEPH_CAP_PIN);
  1410. /* issue */
  1411. mutex_lock(&mdsc->mutex);
  1412. __register_request(mdsc, req, dir);
  1413. __do_request(mdsc, req);
  1414. /* wait */
  1415. if (!req->r_reply) {
  1416. mutex_unlock(&mdsc->mutex);
  1417. if (req->r_timeout) {
  1418. err = wait_for_completion_timeout(&req->r_completion,
  1419. req->r_timeout);
  1420. if (err > 0)
  1421. err = 0;
  1422. else if (err == 0)
  1423. req->r_reply = ERR_PTR(-EIO);
  1424. } else {
  1425. wait_for_completion(&req->r_completion);
  1426. }
  1427. mutex_lock(&mdsc->mutex);
  1428. }
  1429. if (IS_ERR(req->r_reply)) {
  1430. err = PTR_ERR(req->r_reply);
  1431. req->r_reply = NULL;
  1432. /* clean up */
  1433. __unregister_request(mdsc, req);
  1434. if (!list_empty(&req->r_unsafe_item))
  1435. list_del_init(&req->r_unsafe_item);
  1436. complete(&req->r_safe_completion);
  1437. } else if (req->r_err) {
  1438. err = req->r_err;
  1439. } else {
  1440. err = le32_to_cpu(req->r_reply_info.head->result);
  1441. }
  1442. mutex_unlock(&mdsc->mutex);
  1443. dout("do_request %p done, result %d\n", req, err);
  1444. return err;
  1445. }
  1446. /*
  1447. * Handle mds reply.
  1448. *
  1449. * We take the session mutex and parse and process the reply immediately.
  1450. * This preserves the logical ordering of replies, capabilities, etc., sent
  1451. * by the MDS as they are applied to our local cache.
  1452. */
  1453. static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
  1454. {
  1455. struct ceph_mds_client *mdsc = session->s_mdsc;
  1456. struct ceph_mds_request *req;
  1457. struct ceph_mds_reply_head *head = msg->front.iov_base;
  1458. struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
  1459. u64 tid;
  1460. int err, result;
  1461. int mds;
  1462. if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
  1463. return;
  1464. if (msg->front.iov_len < sizeof(*head)) {
  1465. pr_err("mdsc_handle_reply got corrupt (short) reply\n");
  1466. ceph_msg_dump(msg);
  1467. return;
  1468. }
  1469. /* get request, session */
  1470. tid = le64_to_cpu(head->tid);
  1471. mutex_lock(&mdsc->mutex);
  1472. req = __lookup_request(mdsc, tid);
  1473. if (!req) {
  1474. dout("handle_reply on unknown tid %llu\n", tid);
  1475. mutex_unlock(&mdsc->mutex);
  1476. return;
  1477. }
  1478. dout("handle_reply %p\n", req);
  1479. mds = le64_to_cpu(msg->hdr.src.name.num);
  1480. /* correct session? */
  1481. if (!req->r_session && req->r_session != session) {
  1482. pr_err("mdsc_handle_reply got %llu on session mds%d"
  1483. " not mds%d\n", tid, session->s_mds,
  1484. req->r_session ? req->r_session->s_mds : -1);
  1485. mutex_unlock(&mdsc->mutex);
  1486. goto out;
  1487. }
  1488. /* dup? */
  1489. if ((req->r_got_unsafe && !head->safe) ||
  1490. (req->r_got_safe && head->safe)) {
  1491. pr_warning("got a dup %s reply on %llu from mds%d\n",
  1492. head->safe ? "safe" : "unsafe", tid, mds);
  1493. mutex_unlock(&mdsc->mutex);
  1494. goto out;
  1495. }
  1496. result = le32_to_cpu(head->result);
  1497. /*
  1498. * Tolerate 2 consecutive ESTALEs from the same mds.
  1499. * FIXME: we should be looking at the cap migrate_seq.
  1500. */
  1501. if (result == -ESTALE) {
  1502. req->r_direct_mode = USE_AUTH_MDS;
  1503. req->r_num_stale++;
  1504. if (req->r_num_stale <= 2) {
  1505. __do_request(mdsc, req);
  1506. mutex_unlock(&mdsc->mutex);
  1507. goto out;
  1508. }
  1509. } else {
  1510. req->r_num_stale = 0;
  1511. }
  1512. if (head->safe) {
  1513. req->r_got_safe = true;
  1514. __unregister_request(mdsc, req);
  1515. complete(&req->r_safe_completion);
  1516. if (req->r_got_unsafe) {
  1517. /*
  1518. * We already handled the unsafe response, now do the
  1519. * cleanup. No need to examine the response; the MDS
  1520. * doesn't include any result info in the safe
  1521. * response. And even if it did, there is nothing
  1522. * useful we could do with a revised return value.
  1523. */
  1524. dout("got safe reply %llu, mds%d\n", tid, mds);
  1525. list_del_init(&req->r_unsafe_item);
  1526. /* last unsafe request during umount? */
  1527. if (mdsc->stopping && !__get_oldest_tid(mdsc))
  1528. complete(&mdsc->safe_umount_waiters);
  1529. mutex_unlock(&mdsc->mutex);
  1530. goto out;
  1531. }
  1532. }
  1533. BUG_ON(req->r_reply);
  1534. if (!head->safe) {
  1535. req->r_got_unsafe = true;
  1536. list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
  1537. }
  1538. dout("handle_reply tid %lld result %d\n", tid, result);
  1539. rinfo = &req->r_reply_info;
  1540. err = parse_reply_info(msg, rinfo);
  1541. mutex_unlock(&mdsc->mutex);
  1542. mutex_lock(&session->s_mutex);
  1543. if (err < 0) {
  1544. pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds);
  1545. ceph_msg_dump(msg);
  1546. goto out_err;
  1547. }
  1548. /* snap trace */
  1549. if (rinfo->snapblob_len) {
  1550. down_write(&mdsc->snap_rwsem);
  1551. ceph_update_snap_trace(mdsc, rinfo->snapblob,
  1552. rinfo->snapblob + rinfo->snapblob_len,
  1553. le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP);
  1554. downgrade_write(&mdsc->snap_rwsem);
  1555. } else {
  1556. down_read(&mdsc->snap_rwsem);
  1557. }
  1558. /* insert trace into our cache */
  1559. err = ceph_fill_trace(mdsc->client->sb, req, req->r_session);
  1560. if (err == 0) {
  1561. if (result == 0 && rinfo->dir_nr)
  1562. ceph_readdir_prepopulate(req, req->r_session);
  1563. ceph_unreserve_caps(&req->r_caps_reservation);
  1564. }
  1565. up_read(&mdsc->snap_rwsem);
  1566. out_err:
  1567. if (err) {
  1568. req->r_err = err;
  1569. } else {
  1570. req->r_reply = msg;
  1571. ceph_msg_get(msg);
  1572. }
  1573. add_cap_releases(mdsc, req->r_session, -1);
  1574. mutex_unlock(&session->s_mutex);
  1575. /* kick calling process */
  1576. complete_request(mdsc, req);
  1577. out:
  1578. ceph_mdsc_put_request(req);
  1579. return;
  1580. }
  1581. /*
  1582. * handle mds notification that our request has been forwarded.
  1583. */
  1584. static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
  1585. {
  1586. struct ceph_mds_request *req;
  1587. u64 tid;
  1588. u32 next_mds;
  1589. u32 fwd_seq;
  1590. u8 must_resend;
  1591. int err = -EINVAL;
  1592. void *p = msg->front.iov_base;
  1593. void *end = p + msg->front.iov_len;
  1594. int from_mds, state;
  1595. if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
  1596. goto bad;
  1597. from_mds = le64_to_cpu(msg->hdr.src.name.num);
  1598. ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad);
  1599. tid = ceph_decode_64(&p);
  1600. next_mds = ceph_decode_32(&p);
  1601. fwd_seq = ceph_decode_32(&p);
  1602. must_resend = ceph_decode_8(&p);
  1603. WARN_ON(must_resend); /* shouldn't happen. */
  1604. mutex_lock(&mdsc->mutex);
  1605. req = __lookup_request(mdsc, tid);
  1606. if (!req) {
  1607. dout("forward %llu dne\n", tid);
  1608. goto out; /* dup reply? */
  1609. }
  1610. state = mdsc->sessions[next_mds]->s_state;
  1611. if (fwd_seq <= req->r_num_fwd) {
  1612. dout("forward %llu to mds%d - old seq %d <= %d\n",
  1613. tid, next_mds, req->r_num_fwd, fwd_seq);
  1614. } else {
  1615. /* resend. forward race not possible; mds would drop */
  1616. dout("forward %llu to mds%d (we resend)\n", tid, next_mds);
  1617. req->r_num_fwd = fwd_seq;
  1618. req->r_resend_mds = next_mds;
  1619. put_request_session(req);
  1620. __do_request(mdsc, req);
  1621. }
  1622. ceph_mdsc_put_request(req);
  1623. out:
  1624. mutex_unlock(&mdsc->mutex);
  1625. return;
  1626. bad:
  1627. pr_err("mdsc_handle_forward decode error err=%d\n", err);
  1628. }
  1629. /*
  1630. * handle a mds session control message
  1631. */
  1632. static void handle_session(struct ceph_mds_session *session,
  1633. struct ceph_msg *msg)
  1634. {
  1635. struct ceph_mds_client *mdsc = session->s_mdsc;
  1636. u32 op;
  1637. u64 seq;
  1638. int mds;
  1639. struct ceph_mds_session_head *h = msg->front.iov_base;
  1640. int wake = 0;
  1641. if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
  1642. return;
  1643. mds = le64_to_cpu(msg->hdr.src.name.num);
  1644. /* decode */
  1645. if (msg->front.iov_len != sizeof(*h))
  1646. goto bad;
  1647. op = le32_to_cpu(h->op);
  1648. seq = le64_to_cpu(h->seq);
  1649. mutex_lock(&mdsc->mutex);
  1650. /* FIXME: this ttl calculation is generous */
  1651. session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
  1652. mutex_unlock(&mdsc->mutex);
  1653. mutex_lock(&session->s_mutex);
  1654. dout("handle_session mds%d %s %p state %s seq %llu\n",
  1655. mds, ceph_session_op_name(op), session,
  1656. session_state_name(session->s_state), seq);
  1657. if (session->s_state == CEPH_MDS_SESSION_HUNG) {
  1658. session->s_state = CEPH_MDS_SESSION_OPEN;
  1659. pr_info("mds%d came back\n", session->s_mds);
  1660. }
  1661. switch (op) {
  1662. case CEPH_SESSION_OPEN:
  1663. session->s_state = CEPH_MDS_SESSION_OPEN;
  1664. renewed_caps(mdsc, session, 0);
  1665. wake = 1;
  1666. if (mdsc->stopping)
  1667. __close_session(mdsc, session);
  1668. break;
  1669. case CEPH_SESSION_RENEWCAPS:
  1670. if (session->s_renew_seq == seq)
  1671. renewed_caps(mdsc, session, 1);
  1672. break;
  1673. case CEPH_SESSION_CLOSE:
  1674. unregister_session(mdsc, session);
  1675. remove_session_caps(session);
  1676. wake = 1; /* for good measure */
  1677. complete(&mdsc->session_close_waiters);
  1678. kick_requests(mdsc, mds, 0); /* cur only */
  1679. break;
  1680. case CEPH_SESSION_STALE:
  1681. pr_info("mds%d caps went stale, renewing\n",
  1682. session->s_mds);
  1683. spin_lock(&session->s_cap_lock);
  1684. session->s_cap_gen++;
  1685. session->s_cap_ttl = 0;
  1686. spin_unlock(&session->s_cap_lock);
  1687. send_renew_caps(mdsc, session);
  1688. break;
  1689. case CEPH_SESSION_RECALL_STATE:
  1690. trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
  1691. break;
  1692. default:
  1693. pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
  1694. WARN_ON(1);
  1695. }
  1696. mutex_unlock(&session->s_mutex);
  1697. if (wake) {
  1698. mutex_lock(&mdsc->mutex);
  1699. __wake_requests(mdsc, &session->s_waiting);
  1700. mutex_unlock(&mdsc->mutex);
  1701. }
  1702. return;
  1703. bad:
  1704. pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
  1705. (int)msg->front.iov_len);
  1706. ceph_msg_dump(msg);
  1707. return;
  1708. }
  1709. /*
  1710. * called under session->mutex.
  1711. */
  1712. static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
  1713. struct ceph_mds_session *session)
  1714. {
  1715. struct ceph_mds_request *req, *nreq;
  1716. int err;
  1717. dout("replay_unsafe_requests mds%d\n", session->s_mds);
  1718. mutex_lock(&mdsc->mutex);
  1719. list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
  1720. err = __prepare_send_request(mdsc, req, session->s_mds);
  1721. if (!err) {
  1722. ceph_msg_get(req->r_request);
  1723. ceph_con_send(&session->s_con, req->r_request);
  1724. }
  1725. }
  1726. mutex_unlock(&mdsc->mutex);
  1727. }
  1728. /*
  1729. * Encode information about a cap for a reconnect with the MDS.
  1730. */
  1731. struct encode_caps_data {
  1732. void **pp;
  1733. void *end;
  1734. int *num_caps;
  1735. };
  1736. static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
  1737. void *arg)
  1738. {
  1739. struct ceph_mds_cap_reconnect *rec;
  1740. struct ceph_inode_info *ci;
  1741. struct encode_caps_data *data = (struct encode_caps_data *)arg;
  1742. void *p = *(data->pp);
  1743. void *end = data->end;
  1744. char *path;
  1745. int pathlen, err;
  1746. u64 pathbase;
  1747. struct dentry *dentry;
  1748. ci = cap->ci;
  1749. dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
  1750. inode, ceph_vinop(inode), cap, cap->cap_id,
  1751. ceph_cap_string(cap->issued));
  1752. ceph_decode_need(&p, end, sizeof(u64), needmore);
  1753. ceph_encode_64(&p, ceph_ino(inode));
  1754. dentry = d_find_alias(inode);
  1755. if (dentry) {
  1756. path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
  1757. if (IS_ERR(path)) {
  1758. err = PTR_ERR(path);
  1759. BUG_ON(err);
  1760. }
  1761. } else {
  1762. path = NULL;
  1763. pathlen = 0;
  1764. }
  1765. ceph_decode_need(&p, end, pathlen+4, needmore);
  1766. ceph_encode_string(&p, end, path, pathlen);
  1767. ceph_decode_need(&p, end, sizeof(*rec), needmore);
  1768. rec = p;
  1769. p += sizeof(*rec);
  1770. BUG_ON(p > end);
  1771. spin_lock(&inode->i_lock);
  1772. cap->seq = 0; /* reset cap seq */
  1773. cap->issue_seq = 0; /* and issue_seq */
  1774. rec->cap_id = cpu_to_le64(cap->cap_id);
  1775. rec->pathbase = cpu_to_le64(pathbase);
  1776. rec->wanted = cpu_to_le32(__ceph_caps_wanted(ci));
  1777. rec->issued = cpu_to_le32(cap->issued);
  1778. rec->size = cpu_to_le64(inode->i_size);
  1779. ceph_encode_timespec(&rec->mtime, &inode->i_mtime);
  1780. ceph_encode_timespec(&rec->atime, &inode->i_atime);
  1781. rec->snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
  1782. spin_unlock(&inode->i_lock);
  1783. kfree(path);
  1784. dput(dentry);
  1785. (*data->num_caps)++;
  1786. *(data->pp) = p;
  1787. return 0;
  1788. needmore:
  1789. return -ENOSPC;
  1790. }
  1791. /*
  1792. * If an MDS fails and recovers, clients need to reconnect in order to
  1793. * reestablish shared state. This includes all caps issued through
  1794. * this session _and_ the snap_realm hierarchy. Because it's not
  1795. * clear which snap realms the mds cares about, we send everything we
  1796. * know about.. that ensures we'll then get any new info the
  1797. * recovering MDS might have.
  1798. *
  1799. * This is a relatively heavyweight operation, but it's rare.
  1800. *
  1801. * called with mdsc->mutex held.
  1802. */
  1803. static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
  1804. {
  1805. struct ceph_mds_session *session;
  1806. struct ceph_msg *reply;
  1807. int newlen, len = 4 + 1;
  1808. void *p, *end;
  1809. int err;
  1810. int num_caps, num_realms = 0;
  1811. int got;
  1812. u64 next_snap_ino = 0;
  1813. __le32 *pnum_caps, *pnum_realms;
  1814. struct encode_caps_data iter_args;
  1815. pr_info("reconnect to recovering mds%d\n", mds);
  1816. /* find session */
  1817. session = __ceph_lookup_mds_session(mdsc, mds);
  1818. mutex_unlock(&mdsc->mutex); /* drop lock for duration */
  1819. if (session) {
  1820. mutex_lock(&session->s_mutex);
  1821. session->s_state = CEPH_MDS_SESSION_RECONNECTING;
  1822. session->s_seq = 0;
  1823. ceph_con_open(&session->s_con,
  1824. ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
  1825. /* replay unsafe requests */
  1826. replay_unsafe_requests(mdsc, session);
  1827. /* estimate needed space */
  1828. len += session->s_nr_caps *
  1829. (100+sizeof(struct ceph_mds_cap_reconnect));
  1830. pr_info("estimating i need %d bytes for %d caps\n",
  1831. len, session->s_nr_caps);
  1832. } else {
  1833. dout("no session for mds%d, will send short reconnect\n",
  1834. mds);
  1835. }
  1836. down_read(&mdsc->snap_rwsem);
  1837. retry:
  1838. /* build reply */
  1839. reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, len, 0, 0, NULL);
  1840. if (IS_ERR(reply)) {
  1841. err = PTR_ERR(reply);
  1842. pr_err("send_mds_reconnect ENOMEM on %d for mds%d\n",
  1843. len, mds);
  1844. goto out;
  1845. }
  1846. p = reply->front.iov_base;
  1847. end = p + len;
  1848. if (!session) {
  1849. ceph_encode_8(&p, 1); /* session was closed */
  1850. ceph_encode_32(&p, 0);
  1851. goto send;
  1852. }
  1853. dout("session %p state %s\n", session,
  1854. session_state_name(session->s_state));
  1855. /* traverse this session's caps */
  1856. ceph_encode_8(&p, 0);
  1857. pnum_caps = p;
  1858. ceph_encode_32(&p, session->s_nr_caps);
  1859. num_caps = 0;
  1860. iter_args.pp = &p;
  1861. iter_args.end = end;
  1862. iter_args.num_caps = &num_caps;
  1863. err = iterate_session_caps(session, encode_caps_cb, &iter_args);
  1864. if (err == -ENOSPC)
  1865. goto needmore;
  1866. if (err < 0)
  1867. goto out;
  1868. *pnum_caps = cpu_to_le32(num_caps);
  1869. /*
  1870. * snaprealms. we provide mds with the ino, seq (version), and
  1871. * parent for all of our realms. If the mds has any newer info,
  1872. * it will tell us.
  1873. */
  1874. next_snap_ino = 0;
  1875. /* save some space for the snaprealm count */
  1876. pnum_realms = p;
  1877. ceph_decode_need(&p, end, sizeof(*pnum_realms), needmore);
  1878. p += sizeof(*pnum_realms);
  1879. num_realms = 0;
  1880. while (1) {
  1881. struct ceph_snap_realm *realm;
  1882. struct ceph_mds_snaprealm_reconnect *sr_rec;
  1883. got = radix_tree_gang_lookup(&mdsc->snap_realms,
  1884. (void **)&realm, next_snap_ino, 1);
  1885. if (!got)
  1886. break;
  1887. dout(" adding snap realm %llx seq %lld parent %llx\n",
  1888. realm->ino, realm->seq, realm->parent_ino);
  1889. ceph_decode_need(&p, end, sizeof(*sr_rec), needmore);
  1890. sr_rec = p;
  1891. sr_rec->ino = cpu_to_le64(realm->ino);
  1892. sr_rec->seq = cpu_to_le64(realm->seq);
  1893. sr_rec->parent = cpu_to_le64(realm->parent_ino);
  1894. p += sizeof(*sr_rec);
  1895. num_realms++;
  1896. next_snap_ino = realm->ino + 1;
  1897. }
  1898. *pnum_realms = cpu_to_le32(num_realms);
  1899. send:
  1900. reply->front.iov_len = p - reply->front.iov_base;
  1901. reply->hdr.front_len = cpu_to_le32(reply->front.iov_len);
  1902. dout("final len was %u (guessed %d)\n",
  1903. (unsigned)reply->front.iov_len, len);
  1904. ceph_con_send(&session->s_con, reply);
  1905. if (session) {
  1906. session->s_state = CEPH_MDS_SESSION_OPEN;
  1907. __wake_requests(mdsc, &session->s_waiting);
  1908. }
  1909. out:
  1910. up_read(&mdsc->snap_rwsem);
  1911. if (session) {
  1912. mutex_unlock(&session->s_mutex);
  1913. ceph_put_mds_session(session);
  1914. }
  1915. mutex_lock(&mdsc->mutex);
  1916. return;
  1917. needmore:
  1918. /*
  1919. * we need a larger buffer. this doesn't very accurately
  1920. * factor in snap realms, but it's safe.
  1921. */
  1922. num_caps += num_realms;
  1923. newlen = len * ((100 * (session->s_nr_caps+3)) / (num_caps + 1)) / 100;
  1924. pr_info("i guessed %d, and did %d of %d caps, retrying with %d\n",
  1925. len, num_caps, session->s_nr_caps, newlen);
  1926. len = newlen;
  1927. ceph_msg_put(reply);
  1928. goto retry;
  1929. }
  1930. /*
  1931. * compare old and new mdsmaps, kicking requests
  1932. * and closing out old connections as necessary
  1933. *
  1934. * called under mdsc->mutex.
  1935. */
  1936. static void check_new_map(struct ceph_mds_client *mdsc,
  1937. struct ceph_mdsmap *newmap,
  1938. struct ceph_mdsmap *oldmap)
  1939. {
  1940. int i;
  1941. int oldstate, newstate;
  1942. struct ceph_mds_session *s;
  1943. dout("check_new_map new %u old %u\n",
  1944. newmap->m_epoch, oldmap->m_epoch);
  1945. for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
  1946. if (mdsc->sessions[i] == NULL)
  1947. continue;
  1948. s = mdsc->sessions[i];
  1949. oldstate = ceph_mdsmap_get_state(oldmap, i);
  1950. newstate = ceph_mdsmap_get_state(newmap, i);
  1951. dout("check_new_map mds%d state %s -> %s (session %s)\n",
  1952. i, ceph_mds_state_name(oldstate),
  1953. ceph_mds_state_name(newstate),
  1954. session_state_name(s->s_state));
  1955. if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
  1956. ceph_mdsmap_get_addr(newmap, i),
  1957. sizeof(struct ceph_entity_addr))) {
  1958. if (s->s_state == CEPH_MDS_SESSION_OPENING) {
  1959. /* the session never opened, just close it
  1960. * out now */
  1961. __wake_requests(mdsc, &s->s_waiting);
  1962. unregister_session(mdsc, s);
  1963. } else {
  1964. /* just close it */
  1965. mutex_unlock(&mdsc->mutex);
  1966. mutex_lock(&s->s_mutex);
  1967. mutex_lock(&mdsc->mutex);
  1968. ceph_con_close(&s->s_con);
  1969. mutex_unlock(&s->s_mutex);
  1970. s->s_state = CEPH_MDS_SESSION_RESTARTING;
  1971. }
  1972. /* kick any requests waiting on the recovering mds */
  1973. kick_requests(mdsc, i, 1);
  1974. } else if (oldstate == newstate) {
  1975. continue; /* nothing new with this mds */
  1976. }
  1977. /*
  1978. * send reconnect?
  1979. */
  1980. if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
  1981. newstate >= CEPH_MDS_STATE_RECONNECT)
  1982. send_mds_reconnect(mdsc, i);
  1983. /*
  1984. * kick requests on any mds that has gone active.
  1985. *
  1986. * kick requests on cur or forwarder: we may have sent
  1987. * the request to mds1, mds1 told us it forwarded it
  1988. * to mds2, but then we learn mds1 failed and can't be
  1989. * sure it successfully forwarded our request before
  1990. * it died.
  1991. */
  1992. if (oldstate < CEPH_MDS_STATE_ACTIVE &&
  1993. newstate >= CEPH_MDS_STATE_ACTIVE) {
  1994. pr_info("mds%d reconnect completed\n", s->s_mds);
  1995. kick_requests(mdsc, i, 1);
  1996. ceph_kick_flushing_caps(mdsc, s);
  1997. wake_up_session_caps(s, 1);
  1998. }
  1999. }
  2000. }
  2001. /*
  2002. * leases
  2003. */
  2004. /*
  2005. * caller must hold session s_mutex, dentry->d_lock
  2006. */
  2007. void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
  2008. {
  2009. struct ceph_dentry_info *di = ceph_dentry(dentry);
  2010. ceph_put_mds_session(di->lease_session);
  2011. di->lease_session = NULL;
  2012. }
  2013. static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
  2014. {
  2015. struct super_block *sb = mdsc->client->sb;
  2016. struct inode *inode;
  2017. struct ceph_mds_session *session;
  2018. struct ceph_inode_info *ci;
  2019. struct dentry *parent, *dentry;
  2020. struct ceph_dentry_info *di;
  2021. int mds;
  2022. struct ceph_mds_lease *h = msg->front.iov_base;
  2023. struct ceph_vino vino;
  2024. int mask;
  2025. struct qstr dname;
  2026. int release = 0;
  2027. if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
  2028. return;
  2029. mds = le64_to_cpu(msg->hdr.src.name.num);
  2030. dout("handle_lease from mds%d\n", mds);
  2031. /* decode */
  2032. if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
  2033. goto bad;
  2034. vino.ino = le64_to_cpu(h->ino);
  2035. vino.snap = CEPH_NOSNAP;
  2036. mask = le16_to_cpu(h->mask);
  2037. dname.name = (void *)h + sizeof(*h) + sizeof(u32);
  2038. dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
  2039. if (dname.len != get_unaligned_le32(h+1))
  2040. goto bad;
  2041. /* find session */
  2042. mutex_lock(&mdsc->mutex);
  2043. session = __ceph_lookup_mds_session(mdsc, mds);
  2044. mutex_unlock(&mdsc->mutex);
  2045. if (!session) {
  2046. pr_err("handle_lease got lease but no session mds%d\n", mds);
  2047. return;
  2048. }
  2049. mutex_lock(&session->s_mutex);
  2050. session->s_seq++;
  2051. /* lookup inode */
  2052. inode = ceph_find_inode(sb, vino);
  2053. dout("handle_lease '%s', mask %d, ino %llx %p\n",
  2054. ceph_lease_op_name(h->action), mask, vino.ino, inode);
  2055. if (inode == NULL) {
  2056. dout("handle_lease no inode %llx\n", vino.ino);
  2057. goto release;
  2058. }
  2059. ci = ceph_inode(inode);
  2060. /* dentry */
  2061. parent = d_find_alias(inode);
  2062. if (!parent) {
  2063. dout("no parent dentry on inode %p\n", inode);
  2064. WARN_ON(1);
  2065. goto release; /* hrm... */
  2066. }
  2067. dname.hash = full_name_hash(dname.name, dname.len);
  2068. dentry = d_lookup(parent, &dname);
  2069. dput(parent);
  2070. if (!dentry)
  2071. goto release;
  2072. spin_lock(&dentry->d_lock);
  2073. di = ceph_dentry(dentry);
  2074. switch (h->action) {
  2075. case CEPH_MDS_LEASE_REVOKE:
  2076. if (di && di->lease_session == session) {
  2077. h->seq = cpu_to_le32(di->lease_seq);
  2078. __ceph_mdsc_drop_dentry_lease(dentry);
  2079. }
  2080. release = 1;
  2081. break;
  2082. case CEPH_MDS_LEASE_RENEW:
  2083. if (di && di->lease_session == session &&
  2084. di->lease_gen == session->s_cap_gen &&
  2085. di->lease_renew_from &&
  2086. di->lease_renew_after == 0) {
  2087. unsigned long duration =
  2088. le32_to_cpu(h->duration_ms) * HZ / 1000;
  2089. di->lease_seq = le32_to_cpu(h->seq);
  2090. dentry->d_time = di->lease_renew_from + duration;
  2091. di->lease_renew_after = di->lease_renew_from +
  2092. (duration >> 1);
  2093. di->lease_renew_from = 0;
  2094. }
  2095. break;
  2096. }
  2097. spin_unlock(&dentry->d_lock);
  2098. dput(dentry);
  2099. if (!release)
  2100. goto out;
  2101. release:
  2102. /* let's just reuse the same message */
  2103. h->action = CEPH_MDS_LEASE_REVOKE_ACK;
  2104. ceph_msg_get(msg);
  2105. ceph_con_send(&session->s_con, msg);
  2106. out:
  2107. iput(inode);
  2108. mutex_unlock(&session->s_mutex);
  2109. ceph_put_mds_session(session);
  2110. return;
  2111. bad:
  2112. pr_err("corrupt lease message\n");
  2113. ceph_msg_dump(msg);
  2114. }
  2115. void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
  2116. struct inode *inode,
  2117. struct dentry *dentry, char action,
  2118. u32 seq)
  2119. {
  2120. struct ceph_msg *msg;
  2121. struct ceph_mds_lease *lease;
  2122. int len = sizeof(*lease) + sizeof(u32);
  2123. int dnamelen = 0;
  2124. dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
  2125. inode, dentry, ceph_lease_op_name(action), session->s_mds);
  2126. dnamelen = dentry->d_name.len;
  2127. len += dnamelen;
  2128. msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL);
  2129. if (IS_ERR(msg))
  2130. return;
  2131. lease = msg->front.iov_base;
  2132. lease->action = action;
  2133. lease->mask = cpu_to_le16(CEPH_LOCK_DN);
  2134. lease->ino = cpu_to_le64(ceph_vino(inode).ino);
  2135. lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
  2136. lease->seq = cpu_to_le32(seq);
  2137. put_unaligned_le32(dnamelen, lease + 1);
  2138. memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
  2139. /*
  2140. * if this is a preemptive lease RELEASE, no need to
  2141. * flush request stream, since the actual request will
  2142. * soon follow.
  2143. */
  2144. msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
  2145. ceph_con_send(&session->s_con, msg);
  2146. }
  2147. /*
  2148. * Preemptively release a lease we expect to invalidate anyway.
  2149. * Pass @inode always, @dentry is optional.
  2150. */
  2151. void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
  2152. struct dentry *dentry, int mask)
  2153. {
  2154. struct ceph_dentry_info *di;
  2155. struct ceph_mds_session *session;
  2156. u32 seq;
  2157. BUG_ON(inode == NULL);
  2158. BUG_ON(dentry == NULL);
  2159. BUG_ON(mask != CEPH_LOCK_DN);
  2160. /* is dentry lease valid? */
  2161. spin_lock(&dentry->d_lock);
  2162. di = ceph_dentry(dentry);
  2163. if (!di || !di->lease_session ||
  2164. di->lease_session->s_mds < 0 ||
  2165. di->lease_gen != di->lease_session->s_cap_gen ||
  2166. !time_before(jiffies, dentry->d_time)) {
  2167. dout("lease_release inode %p dentry %p -- "
  2168. "no lease on %d\n",
  2169. inode, dentry, mask);
  2170. spin_unlock(&dentry->d_lock);
  2171. return;
  2172. }
  2173. /* we do have a lease on this dentry; note mds and seq */
  2174. session = ceph_get_mds_session(di->lease_session);
  2175. seq = di->lease_seq;
  2176. __ceph_mdsc_drop_dentry_lease(dentry);
  2177. spin_unlock(&dentry->d_lock);
  2178. dout("lease_release inode %p dentry %p mask %d to mds%d\n",
  2179. inode, dentry, mask, session->s_mds);
  2180. ceph_mdsc_lease_send_msg(session, inode, dentry,
  2181. CEPH_MDS_LEASE_RELEASE, seq);
  2182. ceph_put_mds_session(session);
  2183. }
  2184. /*
  2185. * drop all leases (and dentry refs) in preparation for umount
  2186. */
  2187. static void drop_leases(struct ceph_mds_client *mdsc)
  2188. {
  2189. int i;
  2190. dout("drop_leases\n");
  2191. mutex_lock(&mdsc->mutex);
  2192. for (i = 0; i < mdsc->max_sessions; i++) {
  2193. struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
  2194. if (!s)
  2195. continue;
  2196. mutex_unlock(&mdsc->mutex);
  2197. mutex_lock(&s->s_mutex);
  2198. mutex_unlock(&s->s_mutex);
  2199. ceph_put_mds_session(s);
  2200. mutex_lock(&mdsc->mutex);
  2201. }
  2202. mutex_unlock(&mdsc->mutex);
  2203. }
  2204. /*
  2205. * delayed work -- periodically trim expired leases, renew caps with mds
  2206. */
  2207. static void schedule_delayed(struct ceph_mds_client *mdsc)
  2208. {
  2209. int delay = 5;
  2210. unsigned hz = round_jiffies_relative(HZ * delay);
  2211. schedule_delayed_work(&mdsc->delayed_work, hz);
  2212. }
  2213. static void delayed_work(struct work_struct *work)
  2214. {
  2215. int i;
  2216. struct ceph_mds_client *mdsc =
  2217. container_of(work, struct ceph_mds_client, delayed_work.work);
  2218. int renew_interval;
  2219. int renew_caps;
  2220. dout("mdsc delayed_work\n");
  2221. ceph_check_delayed_caps(mdsc);
  2222. mutex_lock(&mdsc->mutex);
  2223. renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
  2224. renew_caps = time_after_eq(jiffies, HZ*renew_interval +
  2225. mdsc->last_renew_caps);
  2226. if (renew_caps)
  2227. mdsc->last_renew_caps = jiffies;
  2228. for (i = 0; i < mdsc->max_sessions; i++) {
  2229. struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
  2230. if (s == NULL)
  2231. continue;
  2232. if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
  2233. dout("resending session close request for mds%d\n",
  2234. s->s_mds);
  2235. request_close_session(mdsc, s);
  2236. ceph_put_mds_session(s);
  2237. continue;
  2238. }
  2239. if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
  2240. if (s->s_state == CEPH_MDS_SESSION_OPEN) {
  2241. s->s_state = CEPH_MDS_SESSION_HUNG;
  2242. pr_info("mds%d hung\n", s->s_mds);
  2243. }
  2244. }
  2245. if (s->s_state < CEPH_MDS_SESSION_OPEN) {
  2246. /* this mds is failed or recovering, just wait */
  2247. ceph_put_mds_session(s);
  2248. continue;
  2249. }
  2250. mutex_unlock(&mdsc->mutex);
  2251. mutex_lock(&s->s_mutex);
  2252. if (renew_caps)
  2253. send_renew_caps(mdsc, s);
  2254. else
  2255. ceph_con_keepalive(&s->s_con);
  2256. add_cap_releases(mdsc, s, -1);
  2257. send_cap_releases(mdsc, s);
  2258. mutex_unlock(&s->s_mutex);
  2259. ceph_put_mds_session(s);
  2260. mutex_lock(&mdsc->mutex);
  2261. }
  2262. mutex_unlock(&mdsc->mutex);
  2263. schedule_delayed(mdsc);
  2264. }
  2265. int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
  2266. {
  2267. mdsc->client = client;
  2268. mutex_init(&mdsc->mutex);
  2269. mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
  2270. init_completion(&mdsc->safe_umount_waiters);
  2271. init_completion(&mdsc->session_close_waiters);
  2272. INIT_LIST_HEAD(&mdsc->waiting_for_map);
  2273. mdsc->sessions = NULL;
  2274. mdsc->max_sessions = 0;
  2275. mdsc->stopping = 0;
  2276. init_rwsem(&mdsc->snap_rwsem);
  2277. INIT_RADIX_TREE(&mdsc->snap_realms, GFP_NOFS);
  2278. INIT_LIST_HEAD(&mdsc->snap_empty);
  2279. spin_lock_init(&mdsc->snap_empty_lock);
  2280. mdsc->last_tid = 0;
  2281. INIT_RADIX_TREE(&mdsc->request_tree, GFP_NOFS);
  2282. INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
  2283. mdsc->last_renew_caps = jiffies;
  2284. INIT_LIST_HEAD(&mdsc->cap_delay_list);
  2285. spin_lock_init(&mdsc->cap_delay_lock);
  2286. INIT_LIST_HEAD(&mdsc->snap_flush_list);
  2287. spin_lock_init(&mdsc->snap_flush_lock);
  2288. mdsc->cap_flush_seq = 0;
  2289. INIT_LIST_HEAD(&mdsc->cap_dirty);
  2290. mdsc->num_cap_flushing = 0;
  2291. spin_lock_init(&mdsc->cap_dirty_lock);
  2292. init_waitqueue_head(&mdsc->cap_flushing_wq);
  2293. spin_lock_init(&mdsc->dentry_lru_lock);
  2294. INIT_LIST_HEAD(&mdsc->dentry_lru);
  2295. return 0;
  2296. }
  2297. /*
  2298. * Wait for safe replies on open mds requests. If we time out, drop
  2299. * all requests from the tree to avoid dangling dentry refs.
  2300. */
  2301. static void wait_requests(struct ceph_mds_client *mdsc)
  2302. {
  2303. struct ceph_mds_request *req;
  2304. struct ceph_client *client = mdsc->client;
  2305. mutex_lock(&mdsc->mutex);
  2306. if (__get_oldest_tid(mdsc)) {
  2307. mutex_unlock(&mdsc->mutex);
  2308. dout("wait_requests waiting for requests\n");
  2309. wait_for_completion_timeout(&mdsc->safe_umount_waiters,
  2310. client->mount_args->mount_timeout * HZ);
  2311. mutex_lock(&mdsc->mutex);
  2312. /* tear down remaining requests */
  2313. while (radix_tree_gang_lookup(&mdsc->request_tree,
  2314. (void **)&req, 0, 1)) {
  2315. dout("wait_requests timed out on tid %llu\n",
  2316. req->r_tid);
  2317. radix_tree_delete(&mdsc->request_tree, req->r_tid);
  2318. ceph_mdsc_put_request(req);
  2319. }
  2320. }
  2321. mutex_unlock(&mdsc->mutex);
  2322. dout("wait_requests done\n");
  2323. }
  2324. /*
  2325. * called before mount is ro, and before dentries are torn down.
  2326. * (hmm, does this still race with new lookups?)
  2327. */
  2328. void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
  2329. {
  2330. dout("pre_umount\n");
  2331. mdsc->stopping = 1;
  2332. drop_leases(mdsc);
  2333. ceph_flush_dirty_caps(mdsc);
  2334. wait_requests(mdsc);
  2335. }
  2336. /*
  2337. * wait for all write mds requests to flush.
  2338. */
  2339. static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
  2340. {
  2341. struct ceph_mds_request *req;
  2342. u64 next_tid = 0;
  2343. int got;
  2344. mutex_lock(&mdsc->mutex);
  2345. dout("wait_unsafe_requests want %lld\n", want_tid);
  2346. while (1) {
  2347. got = radix_tree_gang_lookup(&mdsc->request_tree, (void **)&req,
  2348. next_tid, 1);
  2349. if (!got)
  2350. break;
  2351. if (req->r_tid > want_tid)
  2352. break;
  2353. next_tid = req->r_tid + 1;
  2354. if ((req->r_op & CEPH_MDS_OP_WRITE) == 0)
  2355. continue; /* not a write op */
  2356. ceph_mdsc_get_request(req);
  2357. mutex_unlock(&mdsc->mutex);
  2358. dout("wait_unsafe_requests wait on %llu (want %llu)\n",
  2359. req->r_tid, want_tid);
  2360. wait_for_completion(&req->r_safe_completion);
  2361. mutex_lock(&mdsc->mutex);
  2362. ceph_mdsc_put_request(req);
  2363. }
  2364. mutex_unlock(&mdsc->mutex);
  2365. dout("wait_unsafe_requests done\n");
  2366. }
  2367. void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
  2368. {
  2369. u64 want_tid, want_flush;
  2370. dout("sync\n");
  2371. mutex_lock(&mdsc->mutex);
  2372. want_tid = mdsc->last_tid;
  2373. want_flush = mdsc->cap_flush_seq;
  2374. mutex_unlock(&mdsc->mutex);
  2375. dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
  2376. ceph_flush_dirty_caps(mdsc);
  2377. wait_unsafe_requests(mdsc, want_tid);
  2378. wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
  2379. }
  2380. /*
  2381. * called after sb is ro.
  2382. */
  2383. void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
  2384. {
  2385. struct ceph_mds_session *session;
  2386. int i;
  2387. int n;
  2388. struct ceph_client *client = mdsc->client;
  2389. unsigned long started, timeout = client->mount_args->mount_timeout * HZ;
  2390. dout("close_sessions\n");
  2391. mutex_lock(&mdsc->mutex);
  2392. /* close sessions */
  2393. started = jiffies;
  2394. while (time_before(jiffies, started + timeout)) {
  2395. dout("closing sessions\n");
  2396. n = 0;
  2397. for (i = 0; i < mdsc->max_sessions; i++) {
  2398. session = __ceph_lookup_mds_session(mdsc, i);
  2399. if (!session)
  2400. continue;
  2401. mutex_unlock(&mdsc->mutex);
  2402. mutex_lock(&session->s_mutex);
  2403. __close_session(mdsc, session);
  2404. mutex_unlock(&session->s_mutex);
  2405. ceph_put_mds_session(session);
  2406. mutex_lock(&mdsc->mutex);
  2407. n++;
  2408. }
  2409. if (n == 0)
  2410. break;
  2411. if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
  2412. break;
  2413. dout("waiting for sessions to close\n");
  2414. mutex_unlock(&mdsc->mutex);
  2415. wait_for_completion_timeout(&mdsc->session_close_waiters,
  2416. timeout);
  2417. mutex_lock(&mdsc->mutex);
  2418. }
  2419. /* tear down remaining sessions */
  2420. for (i = 0; i < mdsc->max_sessions; i++) {
  2421. if (mdsc->sessions[i]) {
  2422. session = get_session(mdsc->sessions[i]);
  2423. unregister_session(mdsc, session);
  2424. mutex_unlock(&mdsc->mutex);
  2425. mutex_lock(&session->s_mutex);
  2426. remove_session_caps(session);
  2427. mutex_unlock(&session->s_mutex);
  2428. ceph_put_mds_session(session);
  2429. mutex_lock(&mdsc->mutex);
  2430. }
  2431. }
  2432. WARN_ON(!list_empty(&mdsc->cap_delay_list));
  2433. mutex_unlock(&mdsc->mutex);
  2434. ceph_cleanup_empty_realms(mdsc);
  2435. cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
  2436. dout("stopped\n");
  2437. }
  2438. void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
  2439. {
  2440. dout("stop\n");
  2441. cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
  2442. if (mdsc->mdsmap)
  2443. ceph_mdsmap_destroy(mdsc->mdsmap);
  2444. kfree(mdsc->sessions);
  2445. }
  2446. /*
  2447. * handle mds map update.
  2448. */
  2449. void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
  2450. {
  2451. u32 epoch;
  2452. u32 maplen;
  2453. void *p = msg->front.iov_base;
  2454. void *end = p + msg->front.iov_len;
  2455. struct ceph_mdsmap *newmap, *oldmap;
  2456. struct ceph_fsid fsid;
  2457. int err = -EINVAL;
  2458. ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
  2459. ceph_decode_copy(&p, &fsid, sizeof(fsid));
  2460. if (ceph_check_fsid(mdsc->client, &fsid) < 0)
  2461. return;
  2462. epoch = ceph_decode_32(&p);
  2463. maplen = ceph_decode_32(&p);
  2464. dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
  2465. /* do we need it? */
  2466. ceph_monc_got_mdsmap(&mdsc->client->monc, epoch);
  2467. mutex_lock(&mdsc->mutex);
  2468. if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
  2469. dout("handle_map epoch %u <= our %u\n",
  2470. epoch, mdsc->mdsmap->m_epoch);
  2471. mutex_unlock(&mdsc->mutex);
  2472. return;
  2473. }
  2474. newmap = ceph_mdsmap_decode(&p, end);
  2475. if (IS_ERR(newmap)) {
  2476. err = PTR_ERR(newmap);
  2477. goto bad_unlock;
  2478. }
  2479. /* swap into place */
  2480. if (mdsc->mdsmap) {
  2481. oldmap = mdsc->mdsmap;
  2482. mdsc->mdsmap = newmap;
  2483. check_new_map(mdsc, newmap, oldmap);
  2484. ceph_mdsmap_destroy(oldmap);
  2485. } else {
  2486. mdsc->mdsmap = newmap; /* first mds map */
  2487. }
  2488. mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
  2489. __wake_requests(mdsc, &mdsc->waiting_for_map);
  2490. mutex_unlock(&mdsc->mutex);
  2491. schedule_delayed(mdsc);
  2492. return;
  2493. bad_unlock:
  2494. mutex_unlock(&mdsc->mutex);
  2495. bad:
  2496. pr_err("error decoding mdsmap %d\n", err);
  2497. return;
  2498. }
  2499. static struct ceph_connection *con_get(struct ceph_connection *con)
  2500. {
  2501. struct ceph_mds_session *s = con->private;
  2502. if (get_session(s)) {
  2503. dout("mdsc con_get %p %d -> %d\n", s,
  2504. atomic_read(&s->s_ref) - 1, atomic_read(&s->s_ref));
  2505. return con;
  2506. }
  2507. dout("mdsc con_get %p FAIL\n", s);
  2508. return NULL;
  2509. }
  2510. static void con_put(struct ceph_connection *con)
  2511. {
  2512. struct ceph_mds_session *s = con->private;
  2513. dout("mdsc con_put %p %d -> %d\n", s, atomic_read(&s->s_ref),
  2514. atomic_read(&s->s_ref) - 1);
  2515. ceph_put_mds_session(s);
  2516. }
  2517. /*
  2518. * if the client is unresponsive for long enough, the mds will kill
  2519. * the session entirely.
  2520. */
  2521. static void peer_reset(struct ceph_connection *con)
  2522. {
  2523. struct ceph_mds_session *s = con->private;
  2524. pr_err("mds%d gave us the boot. IMPLEMENT RECONNECT.\n",
  2525. s->s_mds);
  2526. }
  2527. static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
  2528. {
  2529. struct ceph_mds_session *s = con->private;
  2530. struct ceph_mds_client *mdsc = s->s_mdsc;
  2531. int type = le16_to_cpu(msg->hdr.type);
  2532. switch (type) {
  2533. case CEPH_MSG_MDS_MAP:
  2534. ceph_mdsc_handle_map(mdsc, msg);
  2535. break;
  2536. case CEPH_MSG_CLIENT_SESSION:
  2537. handle_session(s, msg);
  2538. break;
  2539. case CEPH_MSG_CLIENT_REPLY:
  2540. handle_reply(s, msg);
  2541. break;
  2542. case CEPH_MSG_CLIENT_REQUEST_FORWARD:
  2543. handle_forward(mdsc, msg);
  2544. break;
  2545. case CEPH_MSG_CLIENT_CAPS:
  2546. ceph_handle_caps(s, msg);
  2547. break;
  2548. case CEPH_MSG_CLIENT_SNAP:
  2549. ceph_handle_snap(mdsc, msg);
  2550. break;
  2551. case CEPH_MSG_CLIENT_LEASE:
  2552. handle_lease(mdsc, msg);
  2553. break;
  2554. default:
  2555. pr_err("received unknown message type %d %s\n", type,
  2556. ceph_msg_type_name(type));
  2557. }
  2558. ceph_msg_put(msg);
  2559. }
  2560. /*
  2561. * authentication
  2562. */
  2563. static int get_authorizer(struct ceph_connection *con,
  2564. void **buf, int *len, int *proto,
  2565. void **reply_buf, int *reply_len, int force_new)
  2566. {
  2567. struct ceph_mds_session *s = con->private;
  2568. struct ceph_mds_client *mdsc = s->s_mdsc;
  2569. struct ceph_auth_client *ac = mdsc->client->monc.auth;
  2570. int ret = 0;
  2571. if (force_new && s->s_authorizer) {
  2572. ac->ops->destroy_authorizer(ac, s->s_authorizer);
  2573. s->s_authorizer = NULL;
  2574. }
  2575. if (s->s_authorizer == NULL) {
  2576. if (ac->ops->create_authorizer) {
  2577. ret = ac->ops->create_authorizer(
  2578. ac, CEPH_ENTITY_TYPE_MDS,
  2579. &s->s_authorizer,
  2580. &s->s_authorizer_buf,
  2581. &s->s_authorizer_buf_len,
  2582. &s->s_authorizer_reply_buf,
  2583. &s->s_authorizer_reply_buf_len);
  2584. if (ret)
  2585. return ret;
  2586. }
  2587. }
  2588. *proto = ac->protocol;
  2589. *buf = s->s_authorizer_buf;
  2590. *len = s->s_authorizer_buf_len;
  2591. *reply_buf = s->s_authorizer_reply_buf;
  2592. *reply_len = s->s_authorizer_reply_buf_len;
  2593. return 0;
  2594. }
  2595. static int verify_authorizer_reply(struct ceph_connection *con, int len)
  2596. {
  2597. struct ceph_mds_session *s = con->private;
  2598. struct ceph_mds_client *mdsc = s->s_mdsc;
  2599. struct ceph_auth_client *ac = mdsc->client->monc.auth;
  2600. return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
  2601. }
  2602. const static struct ceph_connection_operations mds_con_ops = {
  2603. .get = con_get,
  2604. .put = con_put,
  2605. .dispatch = dispatch,
  2606. .get_authorizer = get_authorizer,
  2607. .verify_authorizer_reply = verify_authorizer_reply,
  2608. .peer_reset = peer_reset,
  2609. .alloc_msg = ceph_alloc_msg,
  2610. .alloc_middle = ceph_alloc_middle,
  2611. };
  2612. /* eof */