osdmap.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. #include <asm/div64.h>
  2. #include "super.h"
  3. #include "osdmap.h"
  4. #include "crush/hash.h"
  5. #include "crush/mapper.h"
  6. #include "decode.h"
  7. #include "ceph_debug.h"
  8. char *ceph_osdmap_state_str(char *str, int len, int state)
  9. {
  10. int flag = 0;
  11. if (!len)
  12. goto done;
  13. *str = '\0';
  14. if (state) {
  15. if (state & CEPH_OSD_EXISTS) {
  16. snprintf(str, len, "exists");
  17. flag = 1;
  18. }
  19. if (state & CEPH_OSD_UP) {
  20. snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
  21. "up");
  22. flag = 1;
  23. }
  24. } else {
  25. snprintf(str, len, "doesn't exist");
  26. }
  27. done:
  28. return str;
  29. }
  30. /* maps */
  31. static int calc_bits_of(unsigned t)
  32. {
  33. int b = 0;
  34. while (t) {
  35. t = t >> 1;
  36. b++;
  37. }
  38. return b;
  39. }
  40. /*
  41. * the foo_mask is the smallest value 2^n-1 that is >= foo.
  42. */
  43. static void calc_pg_masks(struct ceph_pg_pool_info *pi)
  44. {
  45. pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
  46. pi->pgp_num_mask =
  47. (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
  48. pi->lpg_num_mask =
  49. (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
  50. pi->lpgp_num_mask =
  51. (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
  52. }
  53. /*
  54. * decode crush map
  55. */
  56. static int crush_decode_uniform_bucket(void **p, void *end,
  57. struct crush_bucket_uniform *b)
  58. {
  59. dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
  60. ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
  61. ceph_decode_32(p, b->item_weight);
  62. return 0;
  63. bad:
  64. return -EINVAL;
  65. }
  66. static int crush_decode_list_bucket(void **p, void *end,
  67. struct crush_bucket_list *b)
  68. {
  69. int j;
  70. dout("crush_decode_list_bucket %p to %p\n", *p, end);
  71. b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  72. if (b->item_weights == NULL)
  73. return -ENOMEM;
  74. b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  75. if (b->sum_weights == NULL)
  76. return -ENOMEM;
  77. ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
  78. for (j = 0; j < b->h.size; j++) {
  79. ceph_decode_32(p, b->item_weights[j]);
  80. ceph_decode_32(p, b->sum_weights[j]);
  81. }
  82. return 0;
  83. bad:
  84. return -EINVAL;
  85. }
  86. static int crush_decode_tree_bucket(void **p, void *end,
  87. struct crush_bucket_tree *b)
  88. {
  89. int j;
  90. dout("crush_decode_tree_bucket %p to %p\n", *p, end);
  91. ceph_decode_32_safe(p, end, b->num_nodes, bad);
  92. b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
  93. if (b->node_weights == NULL)
  94. return -ENOMEM;
  95. ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
  96. for (j = 0; j < b->num_nodes; j++)
  97. ceph_decode_32(p, b->node_weights[j]);
  98. return 0;
  99. bad:
  100. return -EINVAL;
  101. }
  102. static int crush_decode_straw_bucket(void **p, void *end,
  103. struct crush_bucket_straw *b)
  104. {
  105. int j;
  106. dout("crush_decode_straw_bucket %p to %p\n", *p, end);
  107. b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  108. if (b->item_weights == NULL)
  109. return -ENOMEM;
  110. b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
  111. if (b->straws == NULL)
  112. return -ENOMEM;
  113. ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
  114. for (j = 0; j < b->h.size; j++) {
  115. ceph_decode_32(p, b->item_weights[j]);
  116. ceph_decode_32(p, b->straws[j]);
  117. }
  118. return 0;
  119. bad:
  120. return -EINVAL;
  121. }
  122. static struct crush_map *crush_decode(void *pbyval, void *end)
  123. {
  124. struct crush_map *c;
  125. int err = -EINVAL;
  126. int i, j;
  127. void **p = &pbyval;
  128. void *start = pbyval;
  129. u32 magic;
  130. dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
  131. c = kzalloc(sizeof(*c), GFP_NOFS);
  132. if (c == NULL)
  133. return ERR_PTR(-ENOMEM);
  134. ceph_decode_need(p, end, 4*sizeof(u32), bad);
  135. ceph_decode_32(p, magic);
  136. if (magic != CRUSH_MAGIC) {
  137. pr_err("crush_decode magic %x != current %x\n",
  138. (unsigned)magic, (unsigned)CRUSH_MAGIC);
  139. goto bad;
  140. }
  141. ceph_decode_32(p, c->max_buckets);
  142. ceph_decode_32(p, c->max_rules);
  143. ceph_decode_32(p, c->max_devices);
  144. c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
  145. if (c->device_parents == NULL)
  146. goto badmem;
  147. c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
  148. if (c->bucket_parents == NULL)
  149. goto badmem;
  150. c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
  151. if (c->buckets == NULL)
  152. goto badmem;
  153. c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
  154. if (c->rules == NULL)
  155. goto badmem;
  156. /* buckets */
  157. for (i = 0; i < c->max_buckets; i++) {
  158. int size = 0;
  159. u32 alg;
  160. struct crush_bucket *b;
  161. ceph_decode_32_safe(p, end, alg, bad);
  162. if (alg == 0) {
  163. c->buckets[i] = NULL;
  164. continue;
  165. }
  166. dout("crush_decode bucket %d off %x %p to %p\n",
  167. i, (int)(*p-start), *p, end);
  168. switch (alg) {
  169. case CRUSH_BUCKET_UNIFORM:
  170. size = sizeof(struct crush_bucket_uniform);
  171. break;
  172. case CRUSH_BUCKET_LIST:
  173. size = sizeof(struct crush_bucket_list);
  174. break;
  175. case CRUSH_BUCKET_TREE:
  176. size = sizeof(struct crush_bucket_tree);
  177. break;
  178. case CRUSH_BUCKET_STRAW:
  179. size = sizeof(struct crush_bucket_straw);
  180. break;
  181. default:
  182. goto bad;
  183. }
  184. BUG_ON(size == 0);
  185. b = c->buckets[i] = kzalloc(size, GFP_NOFS);
  186. if (b == NULL)
  187. goto badmem;
  188. ceph_decode_need(p, end, 4*sizeof(u32), bad);
  189. ceph_decode_32(p, b->id);
  190. ceph_decode_16(p, b->type);
  191. ceph_decode_16(p, b->alg);
  192. ceph_decode_32(p, b->weight);
  193. ceph_decode_32(p, b->size);
  194. dout("crush_decode bucket size %d off %x %p to %p\n",
  195. b->size, (int)(*p-start), *p, end);
  196. b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
  197. if (b->items == NULL)
  198. goto badmem;
  199. b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
  200. if (b->perm == NULL)
  201. goto badmem;
  202. b->perm_n = 0;
  203. ceph_decode_need(p, end, b->size*sizeof(u32), bad);
  204. for (j = 0; j < b->size; j++)
  205. ceph_decode_32(p, b->items[j]);
  206. switch (b->alg) {
  207. case CRUSH_BUCKET_UNIFORM:
  208. err = crush_decode_uniform_bucket(p, end,
  209. (struct crush_bucket_uniform *)b);
  210. if (err < 0)
  211. goto bad;
  212. break;
  213. case CRUSH_BUCKET_LIST:
  214. err = crush_decode_list_bucket(p, end,
  215. (struct crush_bucket_list *)b);
  216. if (err < 0)
  217. goto bad;
  218. break;
  219. case CRUSH_BUCKET_TREE:
  220. err = crush_decode_tree_bucket(p, end,
  221. (struct crush_bucket_tree *)b);
  222. if (err < 0)
  223. goto bad;
  224. break;
  225. case CRUSH_BUCKET_STRAW:
  226. err = crush_decode_straw_bucket(p, end,
  227. (struct crush_bucket_straw *)b);
  228. if (err < 0)
  229. goto bad;
  230. break;
  231. }
  232. }
  233. /* rules */
  234. dout("rule vec is %p\n", c->rules);
  235. for (i = 0; i < c->max_rules; i++) {
  236. u32 yes;
  237. struct crush_rule *r;
  238. ceph_decode_32_safe(p, end, yes, bad);
  239. if (!yes) {
  240. dout("crush_decode NO rule %d off %x %p to %p\n",
  241. i, (int)(*p-start), *p, end);
  242. c->rules[i] = NULL;
  243. continue;
  244. }
  245. dout("crush_decode rule %d off %x %p to %p\n",
  246. i, (int)(*p-start), *p, end);
  247. /* len */
  248. ceph_decode_32_safe(p, end, yes, bad);
  249. #if BITS_PER_LONG == 32
  250. if (yes > ULONG_MAX / sizeof(struct crush_rule_step))
  251. goto bad;
  252. #endif
  253. r = c->rules[i] = kmalloc(sizeof(*r) +
  254. yes*sizeof(struct crush_rule_step),
  255. GFP_NOFS);
  256. if (r == NULL)
  257. goto badmem;
  258. dout(" rule %d is at %p\n", i, r);
  259. r->len = yes;
  260. ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
  261. ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
  262. for (j = 0; j < r->len; j++) {
  263. ceph_decode_32(p, r->steps[j].op);
  264. ceph_decode_32(p, r->steps[j].arg1);
  265. ceph_decode_32(p, r->steps[j].arg2);
  266. }
  267. }
  268. /* ignore trailing name maps. */
  269. dout("crush_decode success\n");
  270. return c;
  271. badmem:
  272. err = -ENOMEM;
  273. bad:
  274. dout("crush_decode fail %d\n", err);
  275. crush_destroy(c);
  276. return ERR_PTR(err);
  277. }
  278. /*
  279. * osd map
  280. */
  281. void ceph_osdmap_destroy(struct ceph_osdmap *map)
  282. {
  283. dout("osdmap_destroy %p\n", map);
  284. if (map->crush)
  285. crush_destroy(map->crush);
  286. while (!RB_EMPTY_ROOT(&map->pg_temp))
  287. rb_erase(rb_first(&map->pg_temp), &map->pg_temp);
  288. kfree(map->osd_state);
  289. kfree(map->osd_weight);
  290. kfree(map->pg_pool);
  291. kfree(map->osd_addr);
  292. kfree(map);
  293. }
  294. /*
  295. * adjust max osd value. reallocate arrays.
  296. */
  297. static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
  298. {
  299. u8 *state;
  300. struct ceph_entity_addr *addr;
  301. u32 *weight;
  302. state = kcalloc(max, sizeof(*state), GFP_NOFS);
  303. addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
  304. weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
  305. if (state == NULL || addr == NULL || weight == NULL) {
  306. kfree(state);
  307. kfree(addr);
  308. kfree(weight);
  309. return -ENOMEM;
  310. }
  311. /* copy old? */
  312. if (map->osd_state) {
  313. memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
  314. memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
  315. memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
  316. kfree(map->osd_state);
  317. kfree(map->osd_addr);
  318. kfree(map->osd_weight);
  319. }
  320. map->osd_state = state;
  321. map->osd_weight = weight;
  322. map->osd_addr = addr;
  323. map->max_osd = max;
  324. return 0;
  325. }
  326. /*
  327. * Insert a new pg_temp mapping
  328. */
  329. static void __insert_pg_mapping(struct ceph_pg_mapping *new,
  330. struct rb_root *root)
  331. {
  332. struct rb_node **p = &root->rb_node;
  333. struct rb_node *parent = NULL;
  334. struct ceph_pg_mapping *pg = NULL;
  335. while (*p) {
  336. parent = *p;
  337. pg = rb_entry(parent, struct ceph_pg_mapping, node);
  338. if (new->pgid < pg->pgid)
  339. p = &(*p)->rb_left;
  340. else if (new->pgid > pg->pgid)
  341. p = &(*p)->rb_right;
  342. else
  343. BUG();
  344. }
  345. rb_link_node(&new->node, parent, p);
  346. rb_insert_color(&new->node, root);
  347. }
  348. /*
  349. * decode a full map.
  350. */
  351. struct ceph_osdmap *osdmap_decode(void **p, void *end)
  352. {
  353. struct ceph_osdmap *map;
  354. u16 version;
  355. u32 len, max, i;
  356. int err = -EINVAL;
  357. void *start = *p;
  358. dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
  359. map = kzalloc(sizeof(*map), GFP_NOFS);
  360. if (map == NULL)
  361. return ERR_PTR(-ENOMEM);
  362. map->pg_temp = RB_ROOT;
  363. ceph_decode_16_safe(p, end, version, bad);
  364. ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
  365. ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
  366. ceph_decode_32(p, map->epoch);
  367. ceph_decode_copy(p, &map->created, sizeof(map->created));
  368. ceph_decode_copy(p, &map->modified, sizeof(map->modified));
  369. ceph_decode_32(p, map->num_pools);
  370. map->pg_pool = kcalloc(map->num_pools, sizeof(*map->pg_pool),
  371. GFP_NOFS);
  372. if (!map->pg_pool) {
  373. err = -ENOMEM;
  374. goto bad;
  375. }
  376. ceph_decode_32_safe(p, end, max, bad);
  377. while (max--) {
  378. ceph_decode_need(p, end, 4+sizeof(map->pg_pool->v), bad);
  379. ceph_decode_32(p, i);
  380. if (i >= map->num_pools)
  381. goto bad;
  382. ceph_decode_copy(p, &map->pg_pool[i].v,
  383. sizeof(map->pg_pool->v));
  384. calc_pg_masks(&map->pg_pool[i]);
  385. p += le32_to_cpu(map->pg_pool[i].v.num_snaps) * sizeof(u64);
  386. p += le32_to_cpu(map->pg_pool[i].v.num_removed_snap_intervals)
  387. * sizeof(u64) * 2;
  388. }
  389. ceph_decode_32_safe(p, end, map->flags, bad);
  390. ceph_decode_32(p, max);
  391. /* (re)alloc osd arrays */
  392. err = osdmap_set_max_osd(map, max);
  393. if (err < 0)
  394. goto bad;
  395. dout("osdmap_decode max_osd = %d\n", map->max_osd);
  396. /* osds */
  397. err = -EINVAL;
  398. ceph_decode_need(p, end, 3*sizeof(u32) +
  399. map->max_osd*(1 + sizeof(*map->osd_weight) +
  400. sizeof(*map->osd_addr)), bad);
  401. *p += 4; /* skip length field (should match max) */
  402. ceph_decode_copy(p, map->osd_state, map->max_osd);
  403. *p += 4; /* skip length field (should match max) */
  404. for (i = 0; i < map->max_osd; i++)
  405. ceph_decode_32(p, map->osd_weight[i]);
  406. *p += 4; /* skip length field (should match max) */
  407. ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
  408. /* pg_temp */
  409. ceph_decode_32_safe(p, end, len, bad);
  410. for (i = 0; i < len; i++) {
  411. int n, j;
  412. u64 pgid;
  413. struct ceph_pg_mapping *pg;
  414. ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
  415. ceph_decode_64(p, pgid);
  416. ceph_decode_32(p, n);
  417. ceph_decode_need(p, end, n * sizeof(u32), bad);
  418. pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
  419. if (!pg) {
  420. err = -ENOMEM;
  421. goto bad;
  422. }
  423. pg->pgid = pgid;
  424. pg->len = n;
  425. for (j = 0; j < n; j++)
  426. ceph_decode_32(p, pg->osds[j]);
  427. __insert_pg_mapping(pg, &map->pg_temp);
  428. dout(" added pg_temp %llx len %d\n", pgid, len);
  429. }
  430. /* crush */
  431. ceph_decode_32_safe(p, end, len, bad);
  432. dout("osdmap_decode crush len %d from off 0x%x\n", len,
  433. (int)(*p - start));
  434. ceph_decode_need(p, end, len, bad);
  435. map->crush = crush_decode(*p, end);
  436. *p += len;
  437. if (IS_ERR(map->crush)) {
  438. err = PTR_ERR(map->crush);
  439. map->crush = NULL;
  440. goto bad;
  441. }
  442. /* ignore the rest of the map */
  443. *p = end;
  444. dout("osdmap_decode done %p %p\n", *p, end);
  445. return map;
  446. bad:
  447. dout("osdmap_decode fail\n");
  448. ceph_osdmap_destroy(map);
  449. return ERR_PTR(err);
  450. }
  451. /*
  452. * decode and apply an incremental map update.
  453. */
  454. struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
  455. struct ceph_osdmap *map,
  456. struct ceph_messenger *msgr)
  457. {
  458. struct ceph_osdmap *newmap = map;
  459. struct crush_map *newcrush = NULL;
  460. struct ceph_fsid fsid;
  461. u32 epoch = 0;
  462. struct ceph_timespec modified;
  463. u32 len, pool;
  464. __s32 new_flags, max;
  465. void *start = *p;
  466. int err = -EINVAL;
  467. u16 version;
  468. struct rb_node *rbp;
  469. ceph_decode_16_safe(p, end, version, bad);
  470. ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
  471. bad);
  472. ceph_decode_copy(p, &fsid, sizeof(fsid));
  473. ceph_decode_32(p, epoch);
  474. BUG_ON(epoch != map->epoch+1);
  475. ceph_decode_copy(p, &modified, sizeof(modified));
  476. ceph_decode_32(p, new_flags);
  477. /* full map? */
  478. ceph_decode_32_safe(p, end, len, bad);
  479. if (len > 0) {
  480. dout("apply_incremental full map len %d, %p to %p\n",
  481. len, *p, end);
  482. newmap = osdmap_decode(p, min(*p+len, end));
  483. return newmap; /* error or not */
  484. }
  485. /* new crush? */
  486. ceph_decode_32_safe(p, end, len, bad);
  487. if (len > 0) {
  488. dout("apply_incremental new crush map len %d, %p to %p\n",
  489. len, *p, end);
  490. newcrush = crush_decode(*p, min(*p+len, end));
  491. if (IS_ERR(newcrush))
  492. return ERR_PTR(PTR_ERR(newcrush));
  493. }
  494. /* new flags? */
  495. if (new_flags >= 0)
  496. map->flags = new_flags;
  497. ceph_decode_need(p, end, 5*sizeof(u32), bad);
  498. /* new max? */
  499. ceph_decode_32(p, max);
  500. if (max >= 0) {
  501. err = osdmap_set_max_osd(map, max);
  502. if (err < 0)
  503. goto bad;
  504. }
  505. map->epoch++;
  506. map->modified = map->modified;
  507. if (newcrush) {
  508. if (map->crush)
  509. crush_destroy(map->crush);
  510. map->crush = newcrush;
  511. newcrush = NULL;
  512. }
  513. /* new_pool */
  514. ceph_decode_32_safe(p, end, len, bad);
  515. while (len--) {
  516. ceph_decode_32_safe(p, end, pool, bad);
  517. if (pool >= map->num_pools) {
  518. void *pg_pool = kcalloc(pool + 1,
  519. sizeof(*map->pg_pool),
  520. GFP_NOFS);
  521. if (!pg_pool) {
  522. err = -ENOMEM;
  523. goto bad;
  524. }
  525. memcpy(pg_pool, map->pg_pool,
  526. map->num_pools * sizeof(*map->pg_pool));
  527. kfree(map->pg_pool);
  528. map->pg_pool = pg_pool;
  529. map->num_pools = pool+1;
  530. }
  531. ceph_decode_copy(p, &map->pg_pool[pool].v,
  532. sizeof(map->pg_pool->v));
  533. calc_pg_masks(&map->pg_pool[pool]);
  534. }
  535. /* old_pool (ignore) */
  536. ceph_decode_32_safe(p, end, len, bad);
  537. *p += len * sizeof(u32);
  538. /* new_up */
  539. err = -EINVAL;
  540. ceph_decode_32_safe(p, end, len, bad);
  541. while (len--) {
  542. u32 osd;
  543. struct ceph_entity_addr addr;
  544. ceph_decode_32_safe(p, end, osd, bad);
  545. ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
  546. pr_info("osd%d up\n", osd);
  547. BUG_ON(osd >= map->max_osd);
  548. map->osd_state[osd] |= CEPH_OSD_UP;
  549. map->osd_addr[osd] = addr;
  550. }
  551. /* new_down */
  552. ceph_decode_32_safe(p, end, len, bad);
  553. while (len--) {
  554. u32 osd;
  555. ceph_decode_32_safe(p, end, osd, bad);
  556. (*p)++; /* clean flag */
  557. pr_info("ceph osd%d down\n", osd);
  558. if (osd < map->max_osd)
  559. map->osd_state[osd] &= ~CEPH_OSD_UP;
  560. }
  561. /* new_weight */
  562. ceph_decode_32_safe(p, end, len, bad);
  563. while (len--) {
  564. u32 osd, off;
  565. ceph_decode_need(p, end, sizeof(u32)*2, bad);
  566. ceph_decode_32(p, osd);
  567. ceph_decode_32(p, off);
  568. pr_info("osd%d weight 0x%x %s\n", osd, off,
  569. off == CEPH_OSD_IN ? "(in)" :
  570. (off == CEPH_OSD_OUT ? "(out)" : ""));
  571. if (osd < map->max_osd)
  572. map->osd_weight[osd] = off;
  573. }
  574. /* new_pg_temp */
  575. rbp = rb_first(&map->pg_temp);
  576. ceph_decode_32_safe(p, end, len, bad);
  577. while (len--) {
  578. struct ceph_pg_mapping *pg;
  579. int j;
  580. u64 pgid;
  581. u32 pglen;
  582. ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
  583. ceph_decode_64(p, pgid);
  584. ceph_decode_32(p, pglen);
  585. /* remove any? */
  586. while (rbp && rb_entry(rbp, struct ceph_pg_mapping,
  587. node)->pgid <= pgid) {
  588. struct rb_node *cur = rbp;
  589. rbp = rb_next(rbp);
  590. dout(" removed pg_temp %llx\n",
  591. rb_entry(cur, struct ceph_pg_mapping, node)->pgid);
  592. rb_erase(cur, &map->pg_temp);
  593. }
  594. if (pglen) {
  595. /* insert */
  596. ceph_decode_need(p, end, pglen*sizeof(u32), bad);
  597. pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
  598. if (!pg) {
  599. err = -ENOMEM;
  600. goto bad;
  601. }
  602. pg->pgid = pgid;
  603. pg->len = pglen;
  604. for (j = 0; j < len; j++)
  605. ceph_decode_32(p, pg->osds[j]);
  606. __insert_pg_mapping(pg, &map->pg_temp);
  607. dout(" added pg_temp %llx len %d\n", pgid, pglen);
  608. }
  609. }
  610. while (rbp) {
  611. struct rb_node *cur = rbp;
  612. rbp = rb_next(rbp);
  613. dout(" removed pg_temp %llx\n",
  614. rb_entry(cur, struct ceph_pg_mapping, node)->pgid);
  615. rb_erase(cur, &map->pg_temp);
  616. }
  617. /* ignore the rest */
  618. *p = end;
  619. return map;
  620. bad:
  621. pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
  622. epoch, (int)(*p - start), *p, start, end);
  623. if (newcrush)
  624. crush_destroy(newcrush);
  625. return ERR_PTR(err);
  626. }
  627. /*
  628. * calculate file layout from given offset, length.
  629. * fill in correct oid, logical length, and object extent
  630. * offset, length.
  631. *
  632. * for now, we write only a single su, until we can
  633. * pass a stride back to the caller.
  634. */
  635. void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
  636. u64 off, u64 *plen,
  637. u64 *bno,
  638. u64 *oxoff, u64 *oxlen)
  639. {
  640. u32 osize = le32_to_cpu(layout->fl_object_size);
  641. u32 su = le32_to_cpu(layout->fl_stripe_unit);
  642. u32 sc = le32_to_cpu(layout->fl_stripe_count);
  643. u32 bl, stripeno, stripepos, objsetno;
  644. u32 su_per_object;
  645. u64 t;
  646. dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen,
  647. osize, su);
  648. su_per_object = osize / le32_to_cpu(layout->fl_stripe_unit);
  649. dout("osize %u / su %u = su_per_object %u\n", osize, su,
  650. su_per_object);
  651. BUG_ON((su & ~PAGE_MASK) != 0);
  652. /* bl = *off / su; */
  653. t = off;
  654. do_div(t, su);
  655. bl = t;
  656. dout("off %llu / su %u = bl %u\n", off, su, bl);
  657. stripeno = bl / sc;
  658. stripepos = bl % sc;
  659. objsetno = stripeno / su_per_object;
  660. *bno = objsetno * sc + stripepos;
  661. dout("objset %u * sc %u = bno %u\n", objsetno, sc, (unsigned)*bno);
  662. /* *oxoff = *off / layout->fl_stripe_unit; */
  663. t = off;
  664. *oxoff = do_div(t, su);
  665. *oxlen = min_t(u64, *plen, su - *oxoff);
  666. *plen = *oxlen;
  667. dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
  668. }
  669. /*
  670. * calculate an object layout (i.e. pgid) from an oid,
  671. * file_layout, and osdmap
  672. */
  673. int ceph_calc_object_layout(struct ceph_object_layout *ol,
  674. const char *oid,
  675. struct ceph_file_layout *fl,
  676. struct ceph_osdmap *osdmap)
  677. {
  678. unsigned num, num_mask;
  679. union ceph_pg pgid;
  680. s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
  681. int poolid = le32_to_cpu(fl->fl_pg_pool);
  682. struct ceph_pg_pool_info *pool;
  683. if (poolid >= osdmap->num_pools)
  684. return -EIO;
  685. pool = &osdmap->pg_pool[poolid];
  686. if (preferred >= 0) {
  687. num = le32_to_cpu(pool->v.lpg_num);
  688. num_mask = pool->lpg_num_mask;
  689. } else {
  690. num = le32_to_cpu(pool->v.pg_num);
  691. num_mask = pool->pg_num_mask;
  692. }
  693. pgid.pg64 = 0; /* start with it zeroed out */
  694. pgid.pg.ps = ceph_full_name_hash(oid, strlen(oid));
  695. pgid.pg.preferred = preferred;
  696. pgid.pg.pool = le32_to_cpu(fl->fl_pg_pool);
  697. if (preferred >= 0)
  698. dout("calc_object_layout '%s' pgid %d.%xp%d (%llx)\n", oid,
  699. pgid.pg.pool, pgid.pg.ps, (int)preferred, pgid.pg64);
  700. else
  701. dout("calc_object_layout '%s' pgid %d.%x (%llx)\n", oid,
  702. pgid.pg.pool, pgid.pg.ps, pgid.pg64);
  703. ol->ol_pgid = cpu_to_le64(pgid.pg64);
  704. ol->ol_stripe_unit = fl->fl_object_stripe_unit;
  705. return 0;
  706. }
  707. /*
  708. * Calculate raw osd vector for the given pgid. Return pointer to osd
  709. * array, or NULL on failure.
  710. */
  711. static int *calc_pg_raw(struct ceph_osdmap *osdmap, union ceph_pg pgid,
  712. int *osds, int *num)
  713. {
  714. struct rb_node *n = osdmap->pg_temp.rb_node;
  715. struct ceph_pg_mapping *pg;
  716. struct ceph_pg_pool_info *pool;
  717. int ruleno;
  718. unsigned pps; /* placement ps */
  719. /* pg_temp? */
  720. while (n) {
  721. pg = rb_entry(n, struct ceph_pg_mapping, node);
  722. if (pgid.pg64 < pg->pgid)
  723. n = n->rb_left;
  724. else if (pgid.pg64 > pg->pgid)
  725. n = n->rb_right;
  726. else {
  727. *num = pg->len;
  728. return pg->osds;
  729. }
  730. }
  731. /* crush */
  732. if (pgid.pg.pool >= osdmap->num_pools)
  733. return NULL;
  734. pool = &osdmap->pg_pool[pgid.pg.pool];
  735. ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
  736. pool->v.type, pool->v.size);
  737. if (ruleno < 0) {
  738. pr_err("no crush rule pool %d type %d size %d\n",
  739. pgid.pg.pool, pool->v.type, pool->v.size);
  740. return NULL;
  741. }
  742. if (pgid.pg.preferred >= 0)
  743. pps = ceph_stable_mod(pgid.pg.ps,
  744. le32_to_cpu(pool->v.lpgp_num),
  745. pool->lpgp_num_mask);
  746. else
  747. pps = ceph_stable_mod(pgid.pg.ps,
  748. le32_to_cpu(pool->v.pgp_num),
  749. pool->pgp_num_mask);
  750. pps += pgid.pg.pool;
  751. *num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
  752. min_t(int, pool->v.size, *num),
  753. pgid.pg.preferred, osdmap->osd_weight);
  754. return osds;
  755. }
  756. /*
  757. * Return primary osd for given pgid, or -1 if none.
  758. */
  759. int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, union ceph_pg pgid)
  760. {
  761. int rawosds[10], *osds;
  762. int i, num = ARRAY_SIZE(rawosds);
  763. osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
  764. if (!osds)
  765. return -1;
  766. /* primary is first up osd */
  767. for (i = 0; i < num; i++)
  768. if (ceph_osd_is_up(osdmap, osds[i])) {
  769. return osds[i];
  770. break;
  771. }
  772. return -1;
  773. }