mon_client.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835
  1. #include "ceph_debug.h"
  2. #include <linux/types.h>
  3. #include <linux/slab.h>
  4. #include <linux/random.h>
  5. #include <linux/sched.h>
  6. #include "mon_client.h"
  7. #include "super.h"
  8. #include "auth.h"
  9. #include "decode.h"
  10. /*
  11. * Interact with Ceph monitor cluster. Handle requests for new map
  12. * versions, and periodically resend as needed. Also implement
  13. * statfs() and umount().
  14. *
  15. * A small cluster of Ceph "monitors" are responsible for managing critical
  16. * cluster configuration and state information. An odd number (e.g., 3, 5)
  17. * of cmon daemons use a modified version of the Paxos part-time parliament
  18. * algorithm to manage the MDS map (mds cluster membership), OSD map, and
  19. * list of clients who have mounted the file system.
  20. *
  21. * We maintain an open, active session with a monitor at all times in order to
  22. * receive timely MDSMap updates. We periodically send a keepalive byte on the
  23. * TCP socket to ensure we detect a failure. If the connection does break, we
  24. * randomly hunt for a new monitor. Once the connection is reestablished, we
  25. * resend any outstanding requests.
  26. */
  27. const static struct ceph_connection_operations mon_con_ops;
  28. static int __validate_auth(struct ceph_mon_client *monc);
  29. /*
  30. * Decode a monmap blob (e.g., during mount).
  31. */
  32. struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
  33. {
  34. struct ceph_monmap *m = NULL;
  35. int i, err = -EINVAL;
  36. struct ceph_fsid fsid;
  37. u32 epoch, num_mon;
  38. u16 version;
  39. u32 len;
  40. ceph_decode_32_safe(&p, end, len, bad);
  41. ceph_decode_need(&p, end, len, bad);
  42. dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
  43. ceph_decode_16_safe(&p, end, version, bad);
  44. ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
  45. ceph_decode_copy(&p, &fsid, sizeof(fsid));
  46. epoch = ceph_decode_32(&p);
  47. num_mon = ceph_decode_32(&p);
  48. ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad);
  49. if (num_mon >= CEPH_MAX_MON)
  50. goto bad;
  51. m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS);
  52. if (m == NULL)
  53. return ERR_PTR(-ENOMEM);
  54. m->fsid = fsid;
  55. m->epoch = epoch;
  56. m->num_mon = num_mon;
  57. ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0]));
  58. for (i = 0; i < num_mon; i++)
  59. ceph_decode_addr(&m->mon_inst[i].addr);
  60. dout("monmap_decode epoch %d, num_mon %d\n", m->epoch,
  61. m->num_mon);
  62. for (i = 0; i < m->num_mon; i++)
  63. dout("monmap_decode mon%d is %s\n", i,
  64. pr_addr(&m->mon_inst[i].addr.in_addr));
  65. return m;
  66. bad:
  67. dout("monmap_decode failed with %d\n", err);
  68. kfree(m);
  69. return ERR_PTR(err);
  70. }
  71. /*
  72. * return true if *addr is included in the monmap.
  73. */
  74. int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
  75. {
  76. int i;
  77. for (i = 0; i < m->num_mon; i++)
  78. if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0)
  79. return 1;
  80. return 0;
  81. }
  82. /*
  83. * Send an auth request.
  84. */
  85. static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
  86. {
  87. monc->pending_auth = 1;
  88. monc->m_auth->front.iov_len = len;
  89. monc->m_auth->hdr.front_len = cpu_to_le32(len);
  90. ceph_msg_get(monc->m_auth); /* keep our ref */
  91. ceph_con_send(monc->con, monc->m_auth);
  92. }
  93. /*
  94. * Close monitor session, if any.
  95. */
  96. static void __close_session(struct ceph_mon_client *monc)
  97. {
  98. if (monc->con) {
  99. dout("__close_session closing mon%d\n", monc->cur_mon);
  100. ceph_con_revoke(monc->con, monc->m_auth);
  101. ceph_con_close(monc->con);
  102. monc->cur_mon = -1;
  103. monc->pending_auth = 0;
  104. ceph_auth_reset(monc->auth);
  105. }
  106. }
  107. /*
  108. * Open a session with a (new) monitor.
  109. */
  110. static int __open_session(struct ceph_mon_client *monc)
  111. {
  112. char r;
  113. int ret;
  114. if (monc->cur_mon < 0) {
  115. get_random_bytes(&r, 1);
  116. monc->cur_mon = r % monc->monmap->num_mon;
  117. dout("open_session num=%d r=%d -> mon%d\n",
  118. monc->monmap->num_mon, r, monc->cur_mon);
  119. monc->sub_sent = 0;
  120. monc->sub_renew_after = jiffies; /* i.e., expired */
  121. monc->want_next_osdmap = !!monc->want_next_osdmap;
  122. dout("open_session mon%d opening\n", monc->cur_mon);
  123. monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON;
  124. monc->con->peer_name.num = cpu_to_le64(monc->cur_mon);
  125. ceph_con_open(monc->con,
  126. &monc->monmap->mon_inst[monc->cur_mon].addr);
  127. /* initiatiate authentication handshake */
  128. ret = ceph_auth_build_hello(monc->auth,
  129. monc->m_auth->front.iov_base,
  130. monc->m_auth->front_max);
  131. __send_prepared_auth_request(monc, ret);
  132. } else {
  133. dout("open_session mon%d already open\n", monc->cur_mon);
  134. }
  135. return 0;
  136. }
  137. static bool __sub_expired(struct ceph_mon_client *monc)
  138. {
  139. return time_after_eq(jiffies, monc->sub_renew_after);
  140. }
  141. /*
  142. * Reschedule delayed work timer.
  143. */
  144. static void __schedule_delayed(struct ceph_mon_client *monc)
  145. {
  146. unsigned delay;
  147. if (monc->cur_mon < 0 || __sub_expired(monc))
  148. delay = 10 * HZ;
  149. else
  150. delay = 20 * HZ;
  151. dout("__schedule_delayed after %u\n", delay);
  152. schedule_delayed_work(&monc->delayed_work, delay);
  153. }
  154. /*
  155. * Send subscribe request for mdsmap and/or osdmap.
  156. */
  157. static void __send_subscribe(struct ceph_mon_client *monc)
  158. {
  159. dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
  160. (unsigned)monc->sub_sent, __sub_expired(monc),
  161. monc->want_next_osdmap);
  162. if ((__sub_expired(monc) && !monc->sub_sent) ||
  163. monc->want_next_osdmap == 1) {
  164. struct ceph_msg *msg;
  165. struct ceph_mon_subscribe_item *i;
  166. void *p, *end;
  167. msg = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, 0, 0, NULL);
  168. if (!msg)
  169. return;
  170. p = msg->front.iov_base;
  171. end = p + msg->front.iov_len;
  172. dout("__send_subscribe to 'mdsmap' %u+\n",
  173. (unsigned)monc->have_mdsmap);
  174. if (monc->want_next_osdmap) {
  175. dout("__send_subscribe to 'osdmap' %u\n",
  176. (unsigned)monc->have_osdmap);
  177. ceph_encode_32(&p, 3);
  178. ceph_encode_string(&p, end, "osdmap", 6);
  179. i = p;
  180. i->have = cpu_to_le64(monc->have_osdmap);
  181. i->onetime = 1;
  182. p += sizeof(*i);
  183. monc->want_next_osdmap = 2; /* requested */
  184. } else {
  185. ceph_encode_32(&p, 2);
  186. }
  187. ceph_encode_string(&p, end, "mdsmap", 6);
  188. i = p;
  189. i->have = cpu_to_le64(monc->have_mdsmap);
  190. i->onetime = 0;
  191. p += sizeof(*i);
  192. ceph_encode_string(&p, end, "monmap", 6);
  193. i = p;
  194. i->have = 0;
  195. i->onetime = 0;
  196. p += sizeof(*i);
  197. msg->front.iov_len = p - msg->front.iov_base;
  198. msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
  199. ceph_con_send(monc->con, msg);
  200. monc->sub_sent = jiffies | 1; /* never 0 */
  201. }
  202. }
  203. static void handle_subscribe_ack(struct ceph_mon_client *monc,
  204. struct ceph_msg *msg)
  205. {
  206. unsigned seconds;
  207. struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
  208. if (msg->front.iov_len < sizeof(*h))
  209. goto bad;
  210. seconds = le32_to_cpu(h->duration);
  211. mutex_lock(&monc->mutex);
  212. if (monc->hunting) {
  213. pr_info("mon%d %s session established\n",
  214. monc->cur_mon, pr_addr(&monc->con->peer_addr.in_addr));
  215. monc->hunting = false;
  216. }
  217. dout("handle_subscribe_ack after %d seconds\n", seconds);
  218. monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1;
  219. monc->sub_sent = 0;
  220. mutex_unlock(&monc->mutex);
  221. return;
  222. bad:
  223. pr_err("got corrupt subscribe-ack msg\n");
  224. ceph_msg_dump(msg);
  225. }
  226. /*
  227. * Keep track of which maps we have
  228. */
  229. int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got)
  230. {
  231. mutex_lock(&monc->mutex);
  232. monc->have_mdsmap = got;
  233. mutex_unlock(&monc->mutex);
  234. return 0;
  235. }
  236. int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got)
  237. {
  238. mutex_lock(&monc->mutex);
  239. monc->have_osdmap = got;
  240. monc->want_next_osdmap = 0;
  241. mutex_unlock(&monc->mutex);
  242. return 0;
  243. }
  244. /*
  245. * Register interest in the next osdmap
  246. */
  247. void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
  248. {
  249. dout("request_next_osdmap have %u\n", monc->have_osdmap);
  250. mutex_lock(&monc->mutex);
  251. if (!monc->want_next_osdmap)
  252. monc->want_next_osdmap = 1;
  253. if (monc->want_next_osdmap < 2)
  254. __send_subscribe(monc);
  255. mutex_unlock(&monc->mutex);
  256. }
  257. /*
  258. *
  259. */
  260. int ceph_monc_open_session(struct ceph_mon_client *monc)
  261. {
  262. if (!monc->con) {
  263. monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
  264. if (!monc->con)
  265. return -ENOMEM;
  266. ceph_con_init(monc->client->msgr, monc->con);
  267. monc->con->private = monc;
  268. monc->con->ops = &mon_con_ops;
  269. }
  270. mutex_lock(&monc->mutex);
  271. __open_session(monc);
  272. __schedule_delayed(monc);
  273. mutex_unlock(&monc->mutex);
  274. return 0;
  275. }
  276. /*
  277. * The monitor responds with mount ack indicate mount success. The
  278. * included client ticket allows the client to talk to MDSs and OSDs.
  279. */
  280. static void ceph_monc_handle_map(struct ceph_mon_client *monc,
  281. struct ceph_msg *msg)
  282. {
  283. struct ceph_client *client = monc->client;
  284. struct ceph_monmap *monmap = NULL, *old = monc->monmap;
  285. void *p, *end;
  286. mutex_lock(&monc->mutex);
  287. dout("handle_monmap\n");
  288. p = msg->front.iov_base;
  289. end = p + msg->front.iov_len;
  290. monmap = ceph_monmap_decode(p, end);
  291. if (IS_ERR(monmap)) {
  292. pr_err("problem decoding monmap, %d\n",
  293. (int)PTR_ERR(monmap));
  294. goto out;
  295. }
  296. if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) {
  297. kfree(monmap);
  298. goto out;
  299. }
  300. client->monc.monmap = monmap;
  301. kfree(old);
  302. out:
  303. mutex_unlock(&monc->mutex);
  304. wake_up(&client->auth_wq);
  305. }
  306. /*
  307. * statfs
  308. */
  309. static struct ceph_mon_statfs_request *__lookup_statfs(
  310. struct ceph_mon_client *monc, u64 tid)
  311. {
  312. struct ceph_mon_statfs_request *req;
  313. struct rb_node *n = monc->statfs_request_tree.rb_node;
  314. while (n) {
  315. req = rb_entry(n, struct ceph_mon_statfs_request, node);
  316. if (tid < req->tid)
  317. n = n->rb_left;
  318. else if (tid > req->tid)
  319. n = n->rb_right;
  320. else
  321. return req;
  322. }
  323. return NULL;
  324. }
  325. static void __insert_statfs(struct ceph_mon_client *monc,
  326. struct ceph_mon_statfs_request *new)
  327. {
  328. struct rb_node **p = &monc->statfs_request_tree.rb_node;
  329. struct rb_node *parent = NULL;
  330. struct ceph_mon_statfs_request *req = NULL;
  331. while (*p) {
  332. parent = *p;
  333. req = rb_entry(parent, struct ceph_mon_statfs_request, node);
  334. if (new->tid < req->tid)
  335. p = &(*p)->rb_left;
  336. else if (new->tid > req->tid)
  337. p = &(*p)->rb_right;
  338. else
  339. BUG();
  340. }
  341. rb_link_node(&new->node, parent, p);
  342. rb_insert_color(&new->node, &monc->statfs_request_tree);
  343. }
  344. static void handle_statfs_reply(struct ceph_mon_client *monc,
  345. struct ceph_msg *msg)
  346. {
  347. struct ceph_mon_statfs_request *req;
  348. struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
  349. u64 tid;
  350. if (msg->front.iov_len != sizeof(*reply))
  351. goto bad;
  352. tid = le64_to_cpu(msg->hdr.tid);
  353. dout("handle_statfs_reply %p tid %llu\n", msg, tid);
  354. mutex_lock(&monc->mutex);
  355. req = __lookup_statfs(monc, tid);
  356. if (req) {
  357. *req->buf = reply->st;
  358. req->result = 0;
  359. }
  360. mutex_unlock(&monc->mutex);
  361. if (req)
  362. complete(&req->completion);
  363. return;
  364. bad:
  365. pr_err("corrupt statfs reply, no tid\n");
  366. ceph_msg_dump(msg);
  367. }
  368. /*
  369. * (re)send a statfs request
  370. */
  371. static int send_statfs(struct ceph_mon_client *monc,
  372. struct ceph_mon_statfs_request *req)
  373. {
  374. struct ceph_msg *msg;
  375. struct ceph_mon_statfs *h;
  376. dout("send_statfs tid %llu\n", req->tid);
  377. msg = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL);
  378. if (IS_ERR(msg))
  379. return PTR_ERR(msg);
  380. req->request = msg;
  381. msg->hdr.tid = cpu_to_le64(req->tid);
  382. h = msg->front.iov_base;
  383. h->monhdr.have_version = 0;
  384. h->monhdr.session_mon = cpu_to_le16(-1);
  385. h->monhdr.session_mon_tid = 0;
  386. h->fsid = monc->monmap->fsid;
  387. ceph_con_send(monc->con, msg);
  388. return 0;
  389. }
  390. /*
  391. * Do a synchronous statfs().
  392. */
  393. int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
  394. {
  395. struct ceph_mon_statfs_request req;
  396. int err;
  397. req.buf = buf;
  398. init_completion(&req.completion);
  399. /* allocate memory for reply */
  400. err = ceph_msgpool_resv(&monc->msgpool_statfs_reply, 1);
  401. if (err)
  402. return err;
  403. /* register request */
  404. mutex_lock(&monc->mutex);
  405. req.tid = ++monc->last_tid;
  406. req.last_attempt = jiffies;
  407. req.delay = BASE_DELAY_INTERVAL;
  408. __insert_statfs(monc, &req);
  409. monc->num_statfs_requests++;
  410. mutex_unlock(&monc->mutex);
  411. /* send request and wait */
  412. err = send_statfs(monc, &req);
  413. if (!err)
  414. err = wait_for_completion_interruptible(&req.completion);
  415. mutex_lock(&monc->mutex);
  416. rb_erase(&req.node, &monc->statfs_request_tree);
  417. monc->num_statfs_requests--;
  418. ceph_msgpool_resv(&monc->msgpool_statfs_reply, -1);
  419. mutex_unlock(&monc->mutex);
  420. if (!err)
  421. err = req.result;
  422. return err;
  423. }
  424. /*
  425. * Resend pending statfs requests.
  426. */
  427. static void __resend_statfs(struct ceph_mon_client *monc)
  428. {
  429. struct ceph_mon_statfs_request *req;
  430. struct rb_node *p;
  431. for (p = rb_first(&monc->statfs_request_tree); p; p = rb_next(p)) {
  432. req = rb_entry(p, struct ceph_mon_statfs_request, node);
  433. send_statfs(monc, req);
  434. }
  435. }
  436. /*
  437. * Delayed work. If we haven't mounted yet, retry. Otherwise,
  438. * renew/retry subscription as needed (in case it is timing out, or we
  439. * got an ENOMEM). And keep the monitor connection alive.
  440. */
  441. static void delayed_work(struct work_struct *work)
  442. {
  443. struct ceph_mon_client *monc =
  444. container_of(work, struct ceph_mon_client, delayed_work.work);
  445. dout("monc delayed_work\n");
  446. mutex_lock(&monc->mutex);
  447. if (monc->hunting) {
  448. __close_session(monc);
  449. __open_session(monc); /* continue hunting */
  450. } else {
  451. ceph_con_keepalive(monc->con);
  452. __validate_auth(monc);
  453. if (monc->auth->ops->is_authenticated(monc->auth))
  454. __send_subscribe(monc);
  455. }
  456. __schedule_delayed(monc);
  457. mutex_unlock(&monc->mutex);
  458. }
  459. /*
  460. * On startup, we build a temporary monmap populated with the IPs
  461. * provided by mount(2).
  462. */
  463. static int build_initial_monmap(struct ceph_mon_client *monc)
  464. {
  465. struct ceph_mount_args *args = monc->client->mount_args;
  466. struct ceph_entity_addr *mon_addr = args->mon_addr;
  467. int num_mon = args->num_mon;
  468. int i;
  469. /* build initial monmap */
  470. monc->monmap = kzalloc(sizeof(*monc->monmap) +
  471. num_mon*sizeof(monc->monmap->mon_inst[0]),
  472. GFP_KERNEL);
  473. if (!monc->monmap)
  474. return -ENOMEM;
  475. for (i = 0; i < num_mon; i++) {
  476. monc->monmap->mon_inst[i].addr = mon_addr[i];
  477. monc->monmap->mon_inst[i].addr.nonce = 0;
  478. monc->monmap->mon_inst[i].name.type =
  479. CEPH_ENTITY_TYPE_MON;
  480. monc->monmap->mon_inst[i].name.num = cpu_to_le64(i);
  481. }
  482. monc->monmap->num_mon = num_mon;
  483. monc->have_fsid = false;
  484. /* release addr memory */
  485. kfree(args->mon_addr);
  486. args->mon_addr = NULL;
  487. args->num_mon = 0;
  488. return 0;
  489. }
  490. int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
  491. {
  492. int err = 0;
  493. dout("init\n");
  494. memset(monc, 0, sizeof(*monc));
  495. monc->client = cl;
  496. monc->monmap = NULL;
  497. mutex_init(&monc->mutex);
  498. err = build_initial_monmap(monc);
  499. if (err)
  500. goto out;
  501. monc->con = NULL;
  502. /* authentication */
  503. monc->auth = ceph_auth_init(cl->mount_args->name,
  504. cl->mount_args->secret);
  505. if (IS_ERR(monc->auth))
  506. return PTR_ERR(monc->auth);
  507. monc->auth->want_keys =
  508. CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
  509. CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
  510. /* msg pools */
  511. err = ceph_msgpool_init(&monc->msgpool_subscribe_ack,
  512. sizeof(struct ceph_mon_subscribe_ack), 1, false);
  513. if (err < 0)
  514. goto out_monmap;
  515. err = ceph_msgpool_init(&monc->msgpool_statfs_reply,
  516. sizeof(struct ceph_mon_statfs_reply), 0, false);
  517. if (err < 0)
  518. goto out_pool1;
  519. err = ceph_msgpool_init(&monc->msgpool_auth_reply, 4096, 1, false);
  520. if (err < 0)
  521. goto out_pool2;
  522. monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, 0, 0, NULL);
  523. monc->pending_auth = 0;
  524. if (IS_ERR(monc->m_auth)) {
  525. err = PTR_ERR(monc->m_auth);
  526. monc->m_auth = NULL;
  527. goto out_pool3;
  528. }
  529. monc->cur_mon = -1;
  530. monc->hunting = true;
  531. monc->sub_renew_after = jiffies;
  532. monc->sub_sent = 0;
  533. INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
  534. monc->statfs_request_tree = RB_ROOT;
  535. monc->num_statfs_requests = 0;
  536. monc->last_tid = 0;
  537. monc->have_mdsmap = 0;
  538. monc->have_osdmap = 0;
  539. monc->want_next_osdmap = 1;
  540. return 0;
  541. out_pool3:
  542. ceph_msgpool_destroy(&monc->msgpool_auth_reply);
  543. out_pool2:
  544. ceph_msgpool_destroy(&monc->msgpool_subscribe_ack);
  545. out_pool1:
  546. ceph_msgpool_destroy(&monc->msgpool_statfs_reply);
  547. out_monmap:
  548. kfree(monc->monmap);
  549. out:
  550. return err;
  551. }
  552. void ceph_monc_stop(struct ceph_mon_client *monc)
  553. {
  554. dout("stop\n");
  555. cancel_delayed_work_sync(&monc->delayed_work);
  556. mutex_lock(&monc->mutex);
  557. __close_session(monc);
  558. if (monc->con) {
  559. monc->con->private = NULL;
  560. monc->con->ops->put(monc->con);
  561. monc->con = NULL;
  562. }
  563. mutex_unlock(&monc->mutex);
  564. ceph_auth_destroy(monc->auth);
  565. ceph_msg_put(monc->m_auth);
  566. ceph_msgpool_destroy(&monc->msgpool_subscribe_ack);
  567. ceph_msgpool_destroy(&monc->msgpool_statfs_reply);
  568. ceph_msgpool_destroy(&monc->msgpool_auth_reply);
  569. kfree(monc->monmap);
  570. }
  571. static void handle_auth_reply(struct ceph_mon_client *monc,
  572. struct ceph_msg *msg)
  573. {
  574. int ret;
  575. mutex_lock(&monc->mutex);
  576. monc->pending_auth = 0;
  577. ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
  578. msg->front.iov_len,
  579. monc->m_auth->front.iov_base,
  580. monc->m_auth->front_max);
  581. if (ret < 0) {
  582. monc->client->auth_err = ret;
  583. wake_up(&monc->client->auth_wq);
  584. } else if (ret > 0) {
  585. __send_prepared_auth_request(monc, ret);
  586. } else if (monc->auth->ops->is_authenticated(monc->auth)) {
  587. dout("authenticated, starting session\n");
  588. monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
  589. monc->client->msgr->inst.name.num = monc->auth->global_id;
  590. __send_subscribe(monc);
  591. __resend_statfs(monc);
  592. }
  593. mutex_unlock(&monc->mutex);
  594. }
  595. static int __validate_auth(struct ceph_mon_client *monc)
  596. {
  597. int ret;
  598. if (monc->pending_auth)
  599. return 0;
  600. ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
  601. monc->m_auth->front_max);
  602. if (ret <= 0)
  603. return ret; /* either an error, or no need to authenticate */
  604. __send_prepared_auth_request(monc, ret);
  605. return 0;
  606. }
  607. int ceph_monc_validate_auth(struct ceph_mon_client *monc)
  608. {
  609. int ret;
  610. mutex_lock(&monc->mutex);
  611. ret = __validate_auth(monc);
  612. mutex_unlock(&monc->mutex);
  613. return ret;
  614. }
  615. /*
  616. * handle incoming message
  617. */
  618. static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
  619. {
  620. struct ceph_mon_client *monc = con->private;
  621. int type = le16_to_cpu(msg->hdr.type);
  622. if (!monc)
  623. return;
  624. switch (type) {
  625. case CEPH_MSG_AUTH_REPLY:
  626. handle_auth_reply(monc, msg);
  627. break;
  628. case CEPH_MSG_MON_SUBSCRIBE_ACK:
  629. handle_subscribe_ack(monc, msg);
  630. break;
  631. case CEPH_MSG_STATFS_REPLY:
  632. handle_statfs_reply(monc, msg);
  633. break;
  634. case CEPH_MSG_MON_MAP:
  635. ceph_monc_handle_map(monc, msg);
  636. break;
  637. case CEPH_MSG_MDS_MAP:
  638. ceph_mdsc_handle_map(&monc->client->mdsc, msg);
  639. break;
  640. case CEPH_MSG_OSD_MAP:
  641. ceph_osdc_handle_map(&monc->client->osdc, msg);
  642. break;
  643. default:
  644. pr_err("received unknown message type %d %s\n", type,
  645. ceph_msg_type_name(type));
  646. }
  647. ceph_msg_put(msg);
  648. }
  649. /*
  650. * Allocate memory for incoming message
  651. */
  652. static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
  653. struct ceph_msg_header *hdr,
  654. int *skip)
  655. {
  656. struct ceph_mon_client *monc = con->private;
  657. int type = le16_to_cpu(hdr->type);
  658. int front_len = le32_to_cpu(hdr->front_len);
  659. struct ceph_msg *m = NULL;
  660. *skip = 0;
  661. switch (type) {
  662. case CEPH_MSG_MON_SUBSCRIBE_ACK:
  663. m = ceph_msgpool_get(&monc->msgpool_subscribe_ack, front_len);
  664. break;
  665. case CEPH_MSG_STATFS_REPLY:
  666. m = ceph_msgpool_get(&monc->msgpool_statfs_reply, front_len);
  667. break;
  668. case CEPH_MSG_AUTH_REPLY:
  669. m = ceph_msgpool_get(&monc->msgpool_auth_reply, front_len);
  670. break;
  671. case CEPH_MSG_MON_MAP:
  672. case CEPH_MSG_MDS_MAP:
  673. case CEPH_MSG_OSD_MAP:
  674. m = ceph_msg_new(type, front_len, 0, 0, NULL);
  675. break;
  676. }
  677. if (!m) {
  678. pr_info("alloc_msg unknown type %d\n", type);
  679. *skip = 1;
  680. }
  681. return m;
  682. }
  683. /*
  684. * If the monitor connection resets, pick a new monitor and resubmit
  685. * any pending requests.
  686. */
  687. static void mon_fault(struct ceph_connection *con)
  688. {
  689. struct ceph_mon_client *monc = con->private;
  690. if (!monc)
  691. return;
  692. dout("mon_fault\n");
  693. mutex_lock(&monc->mutex);
  694. if (!con->private)
  695. goto out;
  696. if (monc->con && !monc->hunting)
  697. pr_info("mon%d %s session lost, "
  698. "hunting for new mon\n", monc->cur_mon,
  699. pr_addr(&monc->con->peer_addr.in_addr));
  700. __close_session(monc);
  701. if (!monc->hunting) {
  702. /* start hunting */
  703. monc->hunting = true;
  704. __open_session(monc);
  705. } else {
  706. /* already hunting, let's wait a bit */
  707. __schedule_delayed(monc);
  708. }
  709. out:
  710. mutex_unlock(&monc->mutex);
  711. }
  712. const static struct ceph_connection_operations mon_con_ops = {
  713. .get = ceph_con_get,
  714. .put = ceph_con_put,
  715. .dispatch = dispatch,
  716. .fault = mon_fault,
  717. .alloc_msg = mon_alloc_msg,
  718. };