volumes.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/bio.h>
  20. #include "ctree.h"
  21. #include "extent_map.h"
  22. #include "disk-io.h"
  23. #include "transaction.h"
  24. #include "print-tree.h"
  25. #include "volumes.h"
  26. struct map_lookup {
  27. struct btrfs_device *dev;
  28. u64 physical;
  29. };
  30. /*
  31. * this uses a pretty simple search, the expectation is that it is
  32. * called very infrequently and that a given device has a small number
  33. * of extents
  34. */
  35. static int find_free_dev_extent(struct btrfs_trans_handle *trans,
  36. struct btrfs_device *device,
  37. struct btrfs_path *path,
  38. u64 num_bytes, u64 *start)
  39. {
  40. struct btrfs_key key;
  41. struct btrfs_root *root = device->dev_root;
  42. struct btrfs_dev_extent *dev_extent = NULL;
  43. u64 hole_size = 0;
  44. u64 last_byte = 0;
  45. u64 search_start = 0;
  46. u64 search_end = device->total_bytes;
  47. int ret;
  48. int slot = 0;
  49. int start_found;
  50. struct extent_buffer *l;
  51. start_found = 0;
  52. path->reada = 2;
  53. /* FIXME use last free of some kind */
  54. key.objectid = device->devid;
  55. key.offset = search_start;
  56. key.type = BTRFS_DEV_EXTENT_KEY;
  57. ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
  58. if (ret < 0)
  59. goto error;
  60. ret = btrfs_previous_item(root, path, 0, key.type);
  61. if (ret < 0)
  62. goto error;
  63. l = path->nodes[0];
  64. btrfs_item_key_to_cpu(l, &key, path->slots[0]);
  65. while (1) {
  66. l = path->nodes[0];
  67. slot = path->slots[0];
  68. if (slot >= btrfs_header_nritems(l)) {
  69. ret = btrfs_next_leaf(root, path);
  70. if (ret == 0)
  71. continue;
  72. if (ret < 0)
  73. goto error;
  74. no_more_items:
  75. if (!start_found) {
  76. if (search_start >= search_end) {
  77. ret = -ENOSPC;
  78. goto error;
  79. }
  80. *start = search_start;
  81. start_found = 1;
  82. goto check_pending;
  83. }
  84. *start = last_byte > search_start ?
  85. last_byte : search_start;
  86. if (search_end <= *start) {
  87. ret = -ENOSPC;
  88. goto error;
  89. }
  90. goto check_pending;
  91. }
  92. btrfs_item_key_to_cpu(l, &key, slot);
  93. if (key.objectid < device->devid)
  94. goto next;
  95. if (key.objectid > device->devid)
  96. goto no_more_items;
  97. if (key.offset >= search_start && key.offset > last_byte &&
  98. start_found) {
  99. if (last_byte < search_start)
  100. last_byte = search_start;
  101. hole_size = key.offset - last_byte;
  102. if (key.offset > last_byte &&
  103. hole_size >= num_bytes) {
  104. *start = last_byte;
  105. goto check_pending;
  106. }
  107. }
  108. if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
  109. goto next;
  110. }
  111. start_found = 1;
  112. dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
  113. last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
  114. next:
  115. path->slots[0]++;
  116. cond_resched();
  117. }
  118. check_pending:
  119. /* we have to make sure we didn't find an extent that has already
  120. * been allocated by the map tree or the original allocation
  121. */
  122. btrfs_release_path(root, path);
  123. BUG_ON(*start < search_start);
  124. if (*start + num_bytes > search_end) {
  125. ret = -ENOSPC;
  126. goto error;
  127. }
  128. /* check for pending inserts here */
  129. return 0;
  130. error:
  131. btrfs_release_path(root, path);
  132. return ret;
  133. }
  134. int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
  135. struct btrfs_device *device,
  136. u64 owner, u64 num_bytes, u64 *start)
  137. {
  138. int ret;
  139. struct btrfs_path *path;
  140. struct btrfs_root *root = device->dev_root;
  141. struct btrfs_dev_extent *extent;
  142. struct extent_buffer *leaf;
  143. struct btrfs_key key;
  144. path = btrfs_alloc_path();
  145. if (!path)
  146. return -ENOMEM;
  147. ret = find_free_dev_extent(trans, device, path, num_bytes, start);
  148. if (ret) {
  149. goto err;
  150. }
  151. key.objectid = device->devid;
  152. key.offset = *start;
  153. key.type = BTRFS_DEV_EXTENT_KEY;
  154. ret = btrfs_insert_empty_item(trans, root, path, &key,
  155. sizeof(*extent));
  156. BUG_ON(ret);
  157. leaf = path->nodes[0];
  158. extent = btrfs_item_ptr(leaf, path->slots[0],
  159. struct btrfs_dev_extent);
  160. btrfs_set_dev_extent_owner(leaf, extent, owner);
  161. btrfs_set_dev_extent_length(leaf, extent, num_bytes);
  162. btrfs_mark_buffer_dirty(leaf);
  163. err:
  164. btrfs_free_path(path);
  165. return ret;
  166. }
  167. static int find_next_chunk(struct btrfs_root *root, u64 *objectid)
  168. {
  169. struct btrfs_path *path;
  170. int ret;
  171. struct btrfs_key key;
  172. struct btrfs_key found_key;
  173. path = btrfs_alloc_path();
  174. BUG_ON(!path);
  175. key.objectid = (u64)-1;
  176. key.offset = (u64)-1;
  177. key.type = BTRFS_CHUNK_ITEM_KEY;
  178. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  179. if (ret < 0)
  180. goto error;
  181. BUG_ON(ret == 0);
  182. ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
  183. if (ret) {
  184. *objectid = 0;
  185. } else {
  186. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  187. path->slots[0]);
  188. *objectid = found_key.objectid + found_key.offset;
  189. }
  190. ret = 0;
  191. error:
  192. btrfs_free_path(path);
  193. return ret;
  194. }
  195. static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
  196. u64 *objectid)
  197. {
  198. int ret;
  199. struct btrfs_key key;
  200. struct btrfs_key found_key;
  201. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  202. key.type = BTRFS_DEV_ITEM_KEY;
  203. key.offset = (u64)-1;
  204. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  205. if (ret < 0)
  206. goto error;
  207. BUG_ON(ret == 0);
  208. ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
  209. BTRFS_DEV_ITEM_KEY);
  210. if (ret) {
  211. *objectid = 1;
  212. } else {
  213. btrfs_item_key_to_cpu(path->nodes[0], &found_key,
  214. path->slots[0]);
  215. *objectid = found_key.offset + 1;
  216. }
  217. ret = 0;
  218. error:
  219. btrfs_release_path(root, path);
  220. return ret;
  221. }
  222. /*
  223. * the device information is stored in the chunk root
  224. * the btrfs_device struct should be fully filled in
  225. */
  226. int btrfs_add_device(struct btrfs_trans_handle *trans,
  227. struct btrfs_root *root,
  228. struct btrfs_device *device)
  229. {
  230. int ret;
  231. struct btrfs_path *path;
  232. struct btrfs_dev_item *dev_item;
  233. struct extent_buffer *leaf;
  234. struct btrfs_key key;
  235. unsigned long ptr;
  236. u64 free_devid;
  237. root = root->fs_info->chunk_root;
  238. path = btrfs_alloc_path();
  239. if (!path)
  240. return -ENOMEM;
  241. ret = find_next_devid(root, path, &free_devid);
  242. if (ret)
  243. goto out;
  244. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  245. key.type = BTRFS_DEV_ITEM_KEY;
  246. key.offset = free_devid;
  247. ret = btrfs_insert_empty_item(trans, root, path, &key,
  248. sizeof(*dev_item) + device->name_len);
  249. if (ret)
  250. goto out;
  251. leaf = path->nodes[0];
  252. dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
  253. btrfs_set_device_id(leaf, dev_item, device->devid);
  254. btrfs_set_device_type(leaf, dev_item, device->type);
  255. btrfs_set_device_io_align(leaf, dev_item, device->io_align);
  256. btrfs_set_device_io_width(leaf, dev_item, device->io_width);
  257. btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
  258. btrfs_set_device_rdev(leaf, dev_item, device->rdev);
  259. btrfs_set_device_partition(leaf, dev_item, device->partition);
  260. btrfs_set_device_name_len(leaf, dev_item, device->name_len);
  261. btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
  262. btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
  263. ptr = (unsigned long)btrfs_device_name(dev_item);
  264. write_extent_buffer(leaf, device->name, ptr, device->name_len);
  265. ptr = (unsigned long)btrfs_device_uuid(dev_item);
  266. write_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
  267. btrfs_mark_buffer_dirty(leaf);
  268. ret = 0;
  269. out:
  270. btrfs_free_path(path);
  271. return ret;
  272. }
  273. int btrfs_update_device(struct btrfs_trans_handle *trans,
  274. struct btrfs_device *device)
  275. {
  276. int ret;
  277. struct btrfs_path *path;
  278. struct btrfs_root *root;
  279. struct btrfs_dev_item *dev_item;
  280. struct extent_buffer *leaf;
  281. struct btrfs_key key;
  282. root = device->dev_root->fs_info->chunk_root;
  283. path = btrfs_alloc_path();
  284. if (!path)
  285. return -ENOMEM;
  286. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  287. key.type = BTRFS_DEV_ITEM_KEY;
  288. key.offset = device->devid;
  289. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  290. if (ret < 0)
  291. goto out;
  292. if (ret > 0) {
  293. ret = -ENOENT;
  294. goto out;
  295. }
  296. leaf = path->nodes[0];
  297. dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
  298. btrfs_set_device_id(leaf, dev_item, device->devid);
  299. btrfs_set_device_type(leaf, dev_item, device->type);
  300. btrfs_set_device_io_align(leaf, dev_item, device->io_align);
  301. btrfs_set_device_io_width(leaf, dev_item, device->io_width);
  302. btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
  303. btrfs_set_device_rdev(leaf, dev_item, device->rdev);
  304. btrfs_set_device_partition(leaf, dev_item, device->partition);
  305. btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
  306. btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
  307. btrfs_mark_buffer_dirty(leaf);
  308. out:
  309. btrfs_free_path(path);
  310. return ret;
  311. }
  312. int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
  313. struct btrfs_root *root,
  314. struct btrfs_key *key,
  315. struct btrfs_chunk *chunk, int item_size)
  316. {
  317. struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
  318. struct btrfs_disk_key disk_key;
  319. u32 array_size;
  320. u8 *ptr;
  321. array_size = btrfs_super_sys_array_size(super_copy);
  322. if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
  323. return -EFBIG;
  324. ptr = super_copy->sys_chunk_array + array_size;
  325. btrfs_cpu_key_to_disk(&disk_key, key);
  326. memcpy(ptr, &disk_key, sizeof(disk_key));
  327. ptr += sizeof(disk_key);
  328. memcpy(ptr, chunk, item_size);
  329. item_size += sizeof(disk_key);
  330. btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
  331. return 0;
  332. }
  333. int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
  334. struct btrfs_root *extent_root, u64 *start,
  335. u64 *num_bytes, u64 type)
  336. {
  337. u64 dev_offset;
  338. struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
  339. struct btrfs_stripe *stripes;
  340. struct btrfs_device *device = NULL;
  341. struct btrfs_chunk *chunk;
  342. struct list_head private_devs;
  343. struct list_head *dev_list = &extent_root->fs_info->devices;
  344. struct list_head *cur;
  345. struct extent_map_tree *em_tree;
  346. struct map_lookup *map;
  347. struct extent_map *em;
  348. u64 physical;
  349. u64 calc_size = 1024 * 1024 * 1024;
  350. u64 avail;
  351. u64 max_avail = 0;
  352. int num_stripes = 1;
  353. int looped = 0;
  354. int ret;
  355. int index;
  356. struct btrfs_key key;
  357. if (list_empty(dev_list))
  358. return -ENOSPC;
  359. again:
  360. INIT_LIST_HEAD(&private_devs);
  361. cur = dev_list->next;
  362. index = 0;
  363. /* build a private list of devices we will allocate from */
  364. while(index < num_stripes) {
  365. device = list_entry(cur, struct btrfs_device, dev_list);
  366. avail = device->total_bytes - device->bytes_used;
  367. cur = cur->next;
  368. if (avail > max_avail)
  369. max_avail = avail;
  370. if (avail >= calc_size) {
  371. list_move_tail(&device->dev_list, &private_devs);
  372. index++;
  373. }
  374. if (cur == dev_list)
  375. break;
  376. }
  377. if (index < num_stripes) {
  378. list_splice(&private_devs, dev_list);
  379. if (!looped && max_avail > 0) {
  380. looped = 1;
  381. calc_size = max_avail;
  382. goto again;
  383. }
  384. return -ENOSPC;
  385. }
  386. ret = find_next_chunk(chunk_root, &key.objectid);
  387. if (ret)
  388. return ret;
  389. chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
  390. if (!chunk)
  391. return -ENOMEM;
  392. stripes = &chunk->stripe;
  393. *num_bytes = calc_size;
  394. index = 0;
  395. while(index < num_stripes) {
  396. BUG_ON(list_empty(&private_devs));
  397. cur = private_devs.next;
  398. device = list_entry(cur, struct btrfs_device, dev_list);
  399. list_move_tail(&device->dev_list, dev_list);
  400. ret = btrfs_alloc_dev_extent(trans, device,
  401. key.objectid,
  402. calc_size, &dev_offset);
  403. BUG_ON(ret);
  404. device->bytes_used += calc_size;
  405. ret = btrfs_update_device(trans, device);
  406. BUG_ON(ret);
  407. btrfs_set_stack_stripe_devid(stripes + index, device->devid);
  408. btrfs_set_stack_stripe_offset(stripes + index, dev_offset);
  409. physical = dev_offset;
  410. index++;
  411. }
  412. BUG_ON(!list_empty(&private_devs));
  413. /* key.objectid was set above */
  414. key.offset = *num_bytes;
  415. key.type = BTRFS_CHUNK_ITEM_KEY;
  416. btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
  417. btrfs_set_stack_chunk_stripe_len(chunk, 64 * 1024);
  418. btrfs_set_stack_chunk_type(chunk, type);
  419. btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
  420. btrfs_set_stack_chunk_io_align(chunk, extent_root->sectorsize);
  421. btrfs_set_stack_chunk_io_width(chunk, extent_root->sectorsize);
  422. btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
  423. ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
  424. btrfs_chunk_item_size(num_stripes));
  425. BUG_ON(ret);
  426. *start = key.objectid;
  427. em = alloc_extent_map(GFP_NOFS);
  428. if (!em)
  429. return -ENOMEM;
  430. map = kmalloc(sizeof(*map), GFP_NOFS);
  431. if (!map) {
  432. free_extent_map(em);
  433. return -ENOMEM;
  434. }
  435. em->bdev = (struct block_device *)map;
  436. em->start = key.objectid;
  437. em->len = key.offset;
  438. em->block_start = 0;
  439. map->physical = physical;
  440. map->dev = device;
  441. if (!map->dev) {
  442. kfree(map);
  443. free_extent_map(em);
  444. return -EIO;
  445. }
  446. kfree(chunk);
  447. em_tree = &extent_root->fs_info->mapping_tree.map_tree;
  448. spin_lock(&em_tree->lock);
  449. ret = add_extent_mapping(em_tree, em);
  450. BUG_ON(ret);
  451. spin_unlock(&em_tree->lock);
  452. free_extent_map(em);
  453. return ret;
  454. }
  455. void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
  456. {
  457. extent_map_tree_init(&tree->map_tree, GFP_NOFS);
  458. }
  459. void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
  460. {
  461. struct extent_map *em;
  462. while(1) {
  463. spin_lock(&tree->map_tree.lock);
  464. em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
  465. if (em)
  466. remove_extent_mapping(&tree->map_tree, em);
  467. spin_unlock(&tree->map_tree.lock);
  468. if (!em)
  469. break;
  470. kfree(em->bdev);
  471. /* once for us */
  472. free_extent_map(em);
  473. /* once for the tree */
  474. free_extent_map(em);
  475. }
  476. }
  477. int btrfs_map_block(struct btrfs_mapping_tree *map_tree,
  478. u64 logical, u64 *phys, u64 *length,
  479. struct btrfs_device **dev)
  480. {
  481. struct extent_map *em;
  482. struct map_lookup *map;
  483. struct extent_map_tree *em_tree = &map_tree->map_tree;
  484. u64 offset;
  485. spin_lock(&em_tree->lock);
  486. em = lookup_extent_mapping(em_tree, logical, *length);
  487. BUG_ON(!em);
  488. BUG_ON(em->start > logical || em->start + em->len < logical);
  489. map = (struct map_lookup *)em->bdev;
  490. offset = logical - em->start;
  491. *phys = map->physical + offset;
  492. *length = em->len - offset;
  493. *dev = map->dev;
  494. free_extent_map(em);
  495. spin_unlock(&em_tree->lock);
  496. return 0;
  497. }
  498. int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio)
  499. {
  500. struct btrfs_mapping_tree *map_tree;
  501. struct btrfs_device *dev;
  502. u64 logical = bio->bi_sector << 9;
  503. u64 physical;
  504. u64 length = 0;
  505. u64 map_length;
  506. struct bio_vec *bvec;
  507. int i;
  508. int ret;
  509. bio_for_each_segment(bvec, bio, i) {
  510. length += bvec->bv_len;
  511. }
  512. map_tree = &root->fs_info->mapping_tree;
  513. map_length = length;
  514. ret = btrfs_map_block(map_tree, logical, &physical, &map_length, &dev);
  515. BUG_ON(map_length < length);
  516. bio->bi_sector = physical >> 9;
  517. bio->bi_bdev = dev->bdev;
  518. submit_bio(rw, bio);
  519. return 0;
  520. }
  521. struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid)
  522. {
  523. struct btrfs_device *dev;
  524. struct list_head *cur = root->fs_info->devices.next;
  525. struct list_head *head = &root->fs_info->devices;
  526. while(cur != head) {
  527. dev = list_entry(cur, struct btrfs_device, dev_list);
  528. if (dev->devid == devid)
  529. return dev;
  530. cur = cur->next;
  531. }
  532. return NULL;
  533. }
  534. static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
  535. struct extent_buffer *leaf,
  536. struct btrfs_chunk *chunk)
  537. {
  538. struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
  539. struct map_lookup *map;
  540. struct extent_map *em;
  541. u64 logical;
  542. u64 length;
  543. u64 devid;
  544. int ret;
  545. logical = key->objectid;
  546. length = key->offset;
  547. spin_lock(&map_tree->map_tree.lock);
  548. em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
  549. /* already mapped? */
  550. if (em && em->start <= logical && em->start + em->len > logical) {
  551. free_extent_map(em);
  552. spin_unlock(&map_tree->map_tree.lock);
  553. return 0;
  554. } else if (em) {
  555. free_extent_map(em);
  556. }
  557. spin_unlock(&map_tree->map_tree.lock);
  558. map = kzalloc(sizeof(*map), GFP_NOFS);
  559. if (!map)
  560. return -ENOMEM;
  561. em = alloc_extent_map(GFP_NOFS);
  562. if (!em)
  563. return -ENOMEM;
  564. map = kmalloc(sizeof(*map), GFP_NOFS);
  565. if (!map) {
  566. free_extent_map(em);
  567. return -ENOMEM;
  568. }
  569. em->bdev = (struct block_device *)map;
  570. em->start = logical;
  571. em->len = length;
  572. em->block_start = 0;
  573. map->physical = btrfs_stripe_offset_nr(leaf, chunk, 0);
  574. devid = btrfs_stripe_devid_nr(leaf, chunk, 0);
  575. map->dev = btrfs_find_device(root, devid);
  576. if (!map->dev) {
  577. kfree(map);
  578. free_extent_map(em);
  579. return -EIO;
  580. }
  581. spin_lock(&map_tree->map_tree.lock);
  582. ret = add_extent_mapping(&map_tree->map_tree, em);
  583. BUG_ON(ret);
  584. spin_unlock(&map_tree->map_tree.lock);
  585. free_extent_map(em);
  586. return 0;
  587. }
  588. static int fill_device_from_item(struct extent_buffer *leaf,
  589. struct btrfs_dev_item *dev_item,
  590. struct btrfs_device *device)
  591. {
  592. unsigned long ptr;
  593. char *name;
  594. device->devid = btrfs_device_id(leaf, dev_item);
  595. device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
  596. device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
  597. device->type = btrfs_device_type(leaf, dev_item);
  598. device->io_align = btrfs_device_io_align(leaf, dev_item);
  599. device->io_width = btrfs_device_io_width(leaf, dev_item);
  600. device->sector_size = btrfs_device_sector_size(leaf, dev_item);
  601. device->rdev = btrfs_device_rdev(leaf, dev_item);
  602. device->partition = btrfs_device_partition(leaf, dev_item);
  603. device->name_len = btrfs_device_name_len(leaf, dev_item);
  604. ptr = (unsigned long)btrfs_device_uuid(dev_item);
  605. read_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
  606. name = kmalloc(device->name_len + 1, GFP_NOFS);
  607. if (!name)
  608. return -ENOMEM;
  609. device->name = name;
  610. ptr = (unsigned long)btrfs_device_name(dev_item);
  611. read_extent_buffer(leaf, name, ptr, device->name_len);
  612. name[device->name_len] = '\0';
  613. return 0;
  614. }
  615. static int read_one_dev(struct btrfs_root *root, struct btrfs_key *key,
  616. struct extent_buffer *leaf,
  617. struct btrfs_dev_item *dev_item)
  618. {
  619. struct btrfs_device *device;
  620. u64 devid;
  621. int ret;
  622. devid = btrfs_device_id(leaf, dev_item);
  623. device = btrfs_find_device(root, devid);
  624. if (!device) {
  625. device = kmalloc(sizeof(*device), GFP_NOFS);
  626. if (!device)
  627. return -ENOMEM;
  628. list_add(&device->dev_list, &root->fs_info->devices);
  629. }
  630. fill_device_from_item(leaf, dev_item, device);
  631. device->dev_root = root->fs_info->dev_root;
  632. device->bdev = root->fs_info->sb->s_bdev;
  633. memcpy(&device->dev_key, key, sizeof(*key));
  634. ret = 0;
  635. #if 0
  636. ret = btrfs_open_device(device);
  637. if (ret) {
  638. kfree(device);
  639. }
  640. #endif
  641. return ret;
  642. }
  643. int btrfs_read_sys_array(struct btrfs_root *root)
  644. {
  645. struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
  646. struct extent_buffer *sb = root->fs_info->sb_buffer;
  647. struct btrfs_disk_key *disk_key;
  648. struct btrfs_dev_item *dev_item;
  649. struct btrfs_chunk *chunk;
  650. struct btrfs_key key;
  651. u32 num_stripes;
  652. u32 array_size;
  653. u32 len = 0;
  654. u8 *ptr;
  655. unsigned long sb_ptr;
  656. u32 cur;
  657. int ret;
  658. int dev_only = 1;
  659. array_size = btrfs_super_sys_array_size(super_copy);
  660. /*
  661. * we do this loop twice, once for the device items and
  662. * once for all of the chunks. This way there are device
  663. * structs filled in for every chunk
  664. */
  665. again:
  666. ptr = super_copy->sys_chunk_array;
  667. sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
  668. cur = 0;
  669. while (cur < array_size) {
  670. disk_key = (struct btrfs_disk_key *)ptr;
  671. btrfs_disk_key_to_cpu(&key, disk_key);
  672. len = sizeof(*disk_key);
  673. ptr += len;
  674. sb_ptr += len;
  675. cur += len;
  676. if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID &&
  677. key.type == BTRFS_DEV_ITEM_KEY) {
  678. dev_item = (struct btrfs_dev_item *)sb_ptr;
  679. if (dev_only) {
  680. ret = read_one_dev(root, &key, sb, dev_item);
  681. BUG_ON(ret);
  682. }
  683. len = sizeof(*dev_item);
  684. len += btrfs_device_name_len(sb, dev_item);
  685. } else if (key.type == BTRFS_CHUNK_ITEM_KEY) {
  686. chunk = (struct btrfs_chunk *)sb_ptr;
  687. if (!dev_only) {
  688. ret = read_one_chunk(root, &key, sb, chunk);
  689. BUG_ON(ret);
  690. }
  691. num_stripes = btrfs_chunk_num_stripes(sb, chunk);
  692. len = btrfs_chunk_item_size(num_stripes);
  693. } else {
  694. BUG();
  695. }
  696. ptr += len;
  697. sb_ptr += len;
  698. cur += len;
  699. }
  700. if (dev_only == 1) {
  701. dev_only = 0;
  702. goto again;
  703. }
  704. return 0;
  705. }
  706. int btrfs_read_chunk_tree(struct btrfs_root *root)
  707. {
  708. struct btrfs_path *path;
  709. struct extent_buffer *leaf;
  710. struct btrfs_key key;
  711. struct btrfs_key found_key;
  712. int ret;
  713. int slot;
  714. root = root->fs_info->chunk_root;
  715. path = btrfs_alloc_path();
  716. if (!path)
  717. return -ENOMEM;
  718. /* first we search for all of the device items, and then we
  719. * read in all of the chunk items. This way we can create chunk
  720. * mappings that reference all of the devices that are afound
  721. */
  722. key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
  723. key.offset = 0;
  724. key.type = 0;
  725. again:
  726. ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
  727. while(1) {
  728. leaf = path->nodes[0];
  729. slot = path->slots[0];
  730. if (slot >= btrfs_header_nritems(leaf)) {
  731. ret = btrfs_next_leaf(root, path);
  732. if (ret == 0)
  733. continue;
  734. if (ret < 0)
  735. goto error;
  736. break;
  737. }
  738. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  739. if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
  740. if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
  741. break;
  742. if (found_key.type == BTRFS_DEV_ITEM_KEY) {
  743. struct btrfs_dev_item *dev_item;
  744. dev_item = btrfs_item_ptr(leaf, slot,
  745. struct btrfs_dev_item);
  746. ret = read_one_dev(root, &found_key, leaf,
  747. dev_item);
  748. BUG_ON(ret);
  749. }
  750. } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
  751. struct btrfs_chunk *chunk;
  752. chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
  753. ret = read_one_chunk(root, &found_key, leaf, chunk);
  754. }
  755. path->slots[0]++;
  756. }
  757. if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
  758. key.objectid = 0;
  759. btrfs_release_path(root, path);
  760. goto again;
  761. }
  762. btrfs_free_path(path);
  763. ret = 0;
  764. error:
  765. return ret;
  766. }