segment.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935
  1. /*
  2. * fs/logfs/segment.c - Handling the Object Store
  3. *
  4. * As should be obvious for Linux kernel code, license is GPLv2
  5. *
  6. * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
  7. *
  8. * Object store or ostore makes up the complete device with exception of
  9. * the superblock and journal areas. Apart from its own metadata it stores
  10. * three kinds of objects: inodes, dentries and blocks, both data and indirect.
  11. */
  12. #include "logfs.h"
  13. static int logfs_mark_segment_bad(struct super_block *sb, u32 segno)
  14. {
  15. struct logfs_super *super = logfs_super(sb);
  16. struct btree_head32 *head = &super->s_reserved_segments;
  17. int err;
  18. err = btree_insert32(head, segno, (void *)1, GFP_NOFS);
  19. if (err)
  20. return err;
  21. logfs_super(sb)->s_bad_segments++;
  22. /* FIXME: write to journal */
  23. return 0;
  24. }
  25. int logfs_erase_segment(struct super_block *sb, u32 segno, int ensure_erase)
  26. {
  27. struct logfs_super *super = logfs_super(sb);
  28. super->s_gec++;
  29. return super->s_devops->erase(sb, (u64)segno << super->s_segshift,
  30. super->s_segsize, ensure_erase);
  31. }
  32. static s64 logfs_get_free_bytes(struct logfs_area *area, size_t bytes)
  33. {
  34. s32 ofs;
  35. logfs_open_area(area, bytes);
  36. ofs = area->a_used_bytes;
  37. area->a_used_bytes += bytes;
  38. BUG_ON(area->a_used_bytes >= logfs_super(area->a_sb)->s_segsize);
  39. return dev_ofs(area->a_sb, area->a_segno, ofs);
  40. }
  41. static struct page *get_mapping_page(struct super_block *sb, pgoff_t index,
  42. int use_filler)
  43. {
  44. struct logfs_super *super = logfs_super(sb);
  45. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  46. filler_t *filler = super->s_devops->readpage;
  47. struct page *page;
  48. BUG_ON(mapping_gfp_mask(mapping) & __GFP_FS);
  49. if (use_filler)
  50. page = read_cache_page(mapping, index, filler, sb);
  51. else {
  52. page = find_or_create_page(mapping, index, GFP_NOFS);
  53. unlock_page(page);
  54. }
  55. return page;
  56. }
  57. void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
  58. int use_filler)
  59. {
  60. pgoff_t index = ofs >> PAGE_SHIFT;
  61. struct page *page;
  62. long offset = ofs & (PAGE_SIZE-1);
  63. long copylen;
  64. /* Only logfs_wbuf_recover may use len==0 */
  65. BUG_ON(!len && !use_filler);
  66. do {
  67. copylen = min((ulong)len, PAGE_SIZE - offset);
  68. page = get_mapping_page(area->a_sb, index, use_filler);
  69. SetPageUptodate(page);
  70. BUG_ON(!page); /* FIXME: reserve a pool */
  71. memcpy(page_address(page) + offset, buf, copylen);
  72. SetPagePrivate(page);
  73. page_cache_release(page);
  74. buf += copylen;
  75. len -= copylen;
  76. offset = 0;
  77. index++;
  78. } while (len);
  79. }
  80. static void pad_partial_page(struct logfs_area *area)
  81. {
  82. struct super_block *sb = area->a_sb;
  83. struct page *page;
  84. u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
  85. pgoff_t index = ofs >> PAGE_SHIFT;
  86. long offset = ofs & (PAGE_SIZE-1);
  87. u32 len = PAGE_SIZE - offset;
  88. if (len % PAGE_SIZE) {
  89. page = get_mapping_page(sb, index, 0);
  90. BUG_ON(!page); /* FIXME: reserve a pool */
  91. memset(page_address(page) + offset, 0xff, len);
  92. SetPagePrivate(page);
  93. page_cache_release(page);
  94. }
  95. }
  96. static void pad_full_pages(struct logfs_area *area)
  97. {
  98. struct super_block *sb = area->a_sb;
  99. struct logfs_super *super = logfs_super(sb);
  100. u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
  101. u32 len = super->s_segsize - area->a_used_bytes;
  102. pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
  103. pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
  104. struct page *page;
  105. while (no_indizes) {
  106. page = get_mapping_page(sb, index, 0);
  107. BUG_ON(!page); /* FIXME: reserve a pool */
  108. SetPageUptodate(page);
  109. memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
  110. SetPagePrivate(page);
  111. page_cache_release(page);
  112. index++;
  113. no_indizes--;
  114. }
  115. }
  116. /*
  117. * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
  118. * Also make sure we allocate (and memset) all pages for final writeout.
  119. */
  120. static void pad_wbuf(struct logfs_area *area, int final)
  121. {
  122. pad_partial_page(area);
  123. if (final)
  124. pad_full_pages(area);
  125. }
  126. /*
  127. * We have to be careful with the alias tree. Since lookup is done by bix,
  128. * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with
  129. * indirect blocks. So always use it through accessor functions.
  130. */
  131. static void *alias_tree_lookup(struct super_block *sb, u64 ino, u64 bix,
  132. level_t level)
  133. {
  134. struct btree_head128 *head = &logfs_super(sb)->s_object_alias_tree;
  135. pgoff_t index = logfs_pack_index(bix, level);
  136. return btree_lookup128(head, ino, index);
  137. }
  138. static int alias_tree_insert(struct super_block *sb, u64 ino, u64 bix,
  139. level_t level, void *val)
  140. {
  141. struct btree_head128 *head = &logfs_super(sb)->s_object_alias_tree;
  142. pgoff_t index = logfs_pack_index(bix, level);
  143. return btree_insert128(head, ino, index, val, GFP_NOFS);
  144. }
  145. static int btree_write_alias(struct super_block *sb, struct logfs_block *block,
  146. write_alias_t *write_one_alias)
  147. {
  148. struct object_alias_item *item;
  149. int err;
  150. list_for_each_entry(item, &block->item_list, list) {
  151. err = write_alias_journal(sb, block->ino, block->bix,
  152. block->level, item->child_no, item->val);
  153. if (err)
  154. return err;
  155. }
  156. return 0;
  157. }
  158. static gc_level_t btree_block_level(struct logfs_block *block)
  159. {
  160. return expand_level(block->ino, block->level);
  161. }
  162. static struct logfs_block_ops btree_block_ops = {
  163. .write_block = btree_write_block,
  164. .block_level = btree_block_level,
  165. .free_block = __free_block,
  166. .write_alias = btree_write_alias,
  167. };
  168. int logfs_load_object_aliases(struct super_block *sb,
  169. struct logfs_obj_alias *oa, int count)
  170. {
  171. struct logfs_super *super = logfs_super(sb);
  172. struct logfs_block *block;
  173. struct object_alias_item *item;
  174. u64 ino, bix;
  175. level_t level;
  176. int i, err;
  177. super->s_flags |= LOGFS_SB_FLAG_OBJ_ALIAS;
  178. count /= sizeof(*oa);
  179. for (i = 0; i < count; i++) {
  180. item = mempool_alloc(super->s_alias_pool, GFP_NOFS);
  181. if (!item)
  182. return -ENOMEM;
  183. memset(item, 0, sizeof(*item));
  184. super->s_no_object_aliases++;
  185. item->val = oa[i].val;
  186. item->child_no = be16_to_cpu(oa[i].child_no);
  187. ino = be64_to_cpu(oa[i].ino);
  188. bix = be64_to_cpu(oa[i].bix);
  189. level = LEVEL(oa[i].level);
  190. log_aliases("logfs_load_object_aliases(%llx, %llx, %x, %x) %llx\n",
  191. ino, bix, level, item->child_no,
  192. be64_to_cpu(item->val));
  193. block = alias_tree_lookup(sb, ino, bix, level);
  194. if (!block) {
  195. block = __alloc_block(sb, ino, bix, level);
  196. block->ops = &btree_block_ops;
  197. err = alias_tree_insert(sb, ino, bix, level, block);
  198. BUG_ON(err); /* mempool empty */
  199. }
  200. if (test_and_set_bit(item->child_no, block->alias_map)) {
  201. printk(KERN_ERR"LogFS: Alias collision detected\n");
  202. return -EIO;
  203. }
  204. list_move_tail(&block->alias_list, &super->s_object_alias);
  205. list_add(&item->list, &block->item_list);
  206. }
  207. return 0;
  208. }
  209. static void kill_alias(void *_block, unsigned long ignore0,
  210. u64 ignore1, u64 ignore2, size_t ignore3)
  211. {
  212. struct logfs_block *block = _block;
  213. struct super_block *sb = block->sb;
  214. struct logfs_super *super = logfs_super(sb);
  215. struct object_alias_item *item;
  216. while (!list_empty(&block->item_list)) {
  217. item = list_entry(block->item_list.next, typeof(*item), list);
  218. list_del(&item->list);
  219. mempool_free(item, super->s_alias_pool);
  220. }
  221. block->ops->free_block(sb, block);
  222. }
  223. static int obj_type(struct inode *inode, level_t level)
  224. {
  225. if (level == 0) {
  226. if (S_ISDIR(inode->i_mode))
  227. return OBJ_DENTRY;
  228. if (inode->i_ino == LOGFS_INO_MASTER)
  229. return OBJ_INODE;
  230. }
  231. return OBJ_BLOCK;
  232. }
  233. static int obj_len(struct super_block *sb, int obj_type)
  234. {
  235. switch (obj_type) {
  236. case OBJ_DENTRY:
  237. return sizeof(struct logfs_disk_dentry);
  238. case OBJ_INODE:
  239. return sizeof(struct logfs_disk_inode);
  240. case OBJ_BLOCK:
  241. return sb->s_blocksize;
  242. default:
  243. BUG();
  244. }
  245. }
  246. static int __logfs_segment_write(struct inode *inode, void *buf,
  247. struct logfs_shadow *shadow, int type, int len, int compr)
  248. {
  249. struct logfs_area *area;
  250. struct super_block *sb = inode->i_sb;
  251. s64 ofs;
  252. struct logfs_object_header h;
  253. int acc_len;
  254. if (shadow->gc_level == 0)
  255. acc_len = len;
  256. else
  257. acc_len = obj_len(sb, type);
  258. area = get_area(sb, shadow->gc_level);
  259. ofs = logfs_get_free_bytes(area, len + LOGFS_OBJECT_HEADERSIZE);
  260. LOGFS_BUG_ON(ofs <= 0, sb);
  261. /*
  262. * Order is important. logfs_get_free_bytes(), by modifying the
  263. * segment file, may modify the content of the very page we're about
  264. * to write now. Which is fine, as long as the calculated crc and
  265. * written data still match. So do the modifications _before_
  266. * calculating the crc.
  267. */
  268. h.len = cpu_to_be16(len);
  269. h.type = type;
  270. h.compr = compr;
  271. h.ino = cpu_to_be64(inode->i_ino);
  272. h.bix = cpu_to_be64(shadow->bix);
  273. h.crc = logfs_crc32(&h, sizeof(h) - 4, 4);
  274. h.data_crc = logfs_crc32(buf, len, 0);
  275. logfs_buf_write(area, ofs, &h, sizeof(h));
  276. logfs_buf_write(area, ofs + LOGFS_OBJECT_HEADERSIZE, buf, len);
  277. shadow->new_ofs = ofs;
  278. shadow->new_len = acc_len + LOGFS_OBJECT_HEADERSIZE;
  279. return 0;
  280. }
  281. static s64 logfs_segment_write_compress(struct inode *inode, void *buf,
  282. struct logfs_shadow *shadow, int type, int len)
  283. {
  284. struct super_block *sb = inode->i_sb;
  285. void *compressor_buf = logfs_super(sb)->s_compressed_je;
  286. ssize_t compr_len;
  287. int ret;
  288. mutex_lock(&logfs_super(sb)->s_journal_mutex);
  289. compr_len = logfs_compress(buf, compressor_buf, len, len);
  290. if (compr_len >= 0) {
  291. ret = __logfs_segment_write(inode, compressor_buf, shadow,
  292. type, compr_len, COMPR_ZLIB);
  293. } else {
  294. ret = __logfs_segment_write(inode, buf, shadow, type, len,
  295. COMPR_NONE);
  296. }
  297. mutex_unlock(&logfs_super(sb)->s_journal_mutex);
  298. return ret;
  299. }
  300. /**
  301. * logfs_segment_write - write data block to object store
  302. * @inode: inode containing data
  303. *
  304. * Returns an errno or zero.
  305. */
  306. int logfs_segment_write(struct inode *inode, struct page *page,
  307. struct logfs_shadow *shadow)
  308. {
  309. struct super_block *sb = inode->i_sb;
  310. struct logfs_super *super = logfs_super(sb);
  311. int do_compress, type, len;
  312. int ret;
  313. void *buf;
  314. super->s_flags |= LOGFS_SB_FLAG_DIRTY;
  315. BUG_ON(super->s_flags & LOGFS_SB_FLAG_SHUTDOWN);
  316. do_compress = logfs_inode(inode)->li_flags & LOGFS_IF_COMPRESSED;
  317. if (shadow->gc_level != 0) {
  318. /* temporarily disable compression for indirect blocks */
  319. do_compress = 0;
  320. }
  321. type = obj_type(inode, shrink_level(shadow->gc_level));
  322. len = obj_len(sb, type);
  323. buf = kmap(page);
  324. if (do_compress)
  325. ret = logfs_segment_write_compress(inode, buf, shadow, type,
  326. len);
  327. else
  328. ret = __logfs_segment_write(inode, buf, shadow, type, len,
  329. COMPR_NONE);
  330. kunmap(page);
  331. log_segment("logfs_segment_write(%llx, %llx, %x) %llx->%llx %x->%x\n",
  332. shadow->ino, shadow->bix, shadow->gc_level,
  333. shadow->old_ofs, shadow->new_ofs,
  334. shadow->old_len, shadow->new_len);
  335. /* this BUG_ON did catch a locking bug. useful */
  336. BUG_ON(!(shadow->new_ofs & (super->s_segsize - 1)));
  337. return ret;
  338. }
  339. int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf)
  340. {
  341. pgoff_t index = ofs >> PAGE_SHIFT;
  342. struct page *page;
  343. long offset = ofs & (PAGE_SIZE-1);
  344. long copylen;
  345. while (len) {
  346. copylen = min((ulong)len, PAGE_SIZE - offset);
  347. page = get_mapping_page(sb, index, 1);
  348. if (IS_ERR(page))
  349. return PTR_ERR(page);
  350. memcpy(buf, page_address(page) + offset, copylen);
  351. page_cache_release(page);
  352. buf += copylen;
  353. len -= copylen;
  354. offset = 0;
  355. index++;
  356. }
  357. return 0;
  358. }
  359. /*
  360. * The "position" of indirect blocks is ambiguous. It can be the position
  361. * of any data block somewhere behind this indirect block. So we need to
  362. * normalize the positions through logfs_block_mask() before comparing.
  363. */
  364. static int check_pos(struct super_block *sb, u64 pos1, u64 pos2, level_t level)
  365. {
  366. return (pos1 & logfs_block_mask(sb, level)) !=
  367. (pos2 & logfs_block_mask(sb, level));
  368. }
  369. #if 0
  370. static int read_seg_header(struct super_block *sb, u64 ofs,
  371. struct logfs_segment_header *sh)
  372. {
  373. __be32 crc;
  374. int err;
  375. err = wbuf_read(sb, ofs, sizeof(*sh), sh);
  376. if (err)
  377. return err;
  378. crc = logfs_crc32(sh, sizeof(*sh), 4);
  379. if (crc != sh->crc) {
  380. printk(KERN_ERR"LOGFS: header crc error at %llx: expected %x, "
  381. "got %x\n", ofs, be32_to_cpu(sh->crc),
  382. be32_to_cpu(crc));
  383. return -EIO;
  384. }
  385. return 0;
  386. }
  387. #endif
  388. static int read_obj_header(struct super_block *sb, u64 ofs,
  389. struct logfs_object_header *oh)
  390. {
  391. __be32 crc;
  392. int err;
  393. err = wbuf_read(sb, ofs, sizeof(*oh), oh);
  394. if (err)
  395. return err;
  396. crc = logfs_crc32(oh, sizeof(*oh) - 4, 4);
  397. if (crc != oh->crc) {
  398. printk(KERN_ERR"LOGFS: header crc error at %llx: expected %x, "
  399. "got %x\n", ofs, be32_to_cpu(oh->crc),
  400. be32_to_cpu(crc));
  401. return -EIO;
  402. }
  403. return 0;
  404. }
  405. static void move_btree_to_page(struct inode *inode, struct page *page,
  406. __be64 *data)
  407. {
  408. struct super_block *sb = inode->i_sb;
  409. struct logfs_super *super = logfs_super(sb);
  410. struct btree_head128 *head = &super->s_object_alias_tree;
  411. struct logfs_block *block;
  412. struct object_alias_item *item, *next;
  413. if (!(super->s_flags & LOGFS_SB_FLAG_OBJ_ALIAS))
  414. return;
  415. block = btree_remove128(head, inode->i_ino, page->index);
  416. if (!block)
  417. return;
  418. log_blockmove("move_btree_to_page(%llx, %llx, %x)\n",
  419. block->ino, block->bix, block->level);
  420. list_for_each_entry_safe(item, next, &block->item_list, list) {
  421. data[item->child_no] = item->val;
  422. list_del(&item->list);
  423. mempool_free(item, super->s_alias_pool);
  424. }
  425. block->page = page;
  426. SetPagePrivate(page);
  427. page->private = (unsigned long)block;
  428. block->ops = &indirect_block_ops;
  429. initialize_block_counters(page, block, data, 0);
  430. }
  431. /*
  432. * This silences a false, yet annoying gcc warning. I hate it when my editor
  433. * jumps into bitops.h each time I recompile this file.
  434. * TODO: Complain to gcc folks about this and upgrade compiler.
  435. */
  436. static unsigned long fnb(const unsigned long *addr,
  437. unsigned long size, unsigned long offset)
  438. {
  439. return find_next_bit(addr, size, offset);
  440. }
  441. void move_page_to_btree(struct page *page)
  442. {
  443. struct logfs_block *block = logfs_block(page);
  444. struct super_block *sb = block->sb;
  445. struct logfs_super *super = logfs_super(sb);
  446. struct object_alias_item *item;
  447. unsigned long pos;
  448. __be64 *child;
  449. int err;
  450. if (super->s_flags & LOGFS_SB_FLAG_SHUTDOWN) {
  451. block->ops->free_block(sb, block);
  452. return;
  453. }
  454. log_blockmove("move_page_to_btree(%llx, %llx, %x)\n",
  455. block->ino, block->bix, block->level);
  456. super->s_flags |= LOGFS_SB_FLAG_OBJ_ALIAS;
  457. for (pos = 0; ; pos++) {
  458. pos = fnb(block->alias_map, LOGFS_BLOCK_FACTOR, pos);
  459. if (pos >= LOGFS_BLOCK_FACTOR)
  460. break;
  461. item = mempool_alloc(super->s_alias_pool, GFP_NOFS);
  462. BUG_ON(!item); /* mempool empty */
  463. memset(item, 0, sizeof(*item));
  464. child = kmap_atomic(page, KM_USER0);
  465. item->val = child[pos];
  466. kunmap_atomic(child, KM_USER0);
  467. item->child_no = pos;
  468. list_add(&item->list, &block->item_list);
  469. }
  470. block->page = NULL;
  471. ClearPagePrivate(page);
  472. page->private = 0;
  473. block->ops = &btree_block_ops;
  474. err = alias_tree_insert(block->sb, block->ino, block->bix, block->level,
  475. block);
  476. BUG_ON(err); /* mempool empty */
  477. ClearPageUptodate(page);
  478. }
  479. static int __logfs_segment_read(struct inode *inode, void *buf,
  480. u64 ofs, u64 bix, level_t level)
  481. {
  482. struct super_block *sb = inode->i_sb;
  483. void *compressor_buf = logfs_super(sb)->s_compressed_je;
  484. struct logfs_object_header oh;
  485. __be32 crc;
  486. u16 len;
  487. int err, block_len;
  488. block_len = obj_len(sb, obj_type(inode, level));
  489. err = read_obj_header(sb, ofs, &oh);
  490. if (err)
  491. goto out_err;
  492. err = -EIO;
  493. if (be64_to_cpu(oh.ino) != inode->i_ino
  494. || check_pos(sb, be64_to_cpu(oh.bix), bix, level)) {
  495. printk(KERN_ERR"LOGFS: (ino, bix) don't match at %llx: "
  496. "expected (%lx, %llx), got (%llx, %llx)\n",
  497. ofs, inode->i_ino, bix,
  498. be64_to_cpu(oh.ino), be64_to_cpu(oh.bix));
  499. goto out_err;
  500. }
  501. len = be16_to_cpu(oh.len);
  502. switch (oh.compr) {
  503. case COMPR_NONE:
  504. err = wbuf_read(sb, ofs + LOGFS_OBJECT_HEADERSIZE, len, buf);
  505. if (err)
  506. goto out_err;
  507. crc = logfs_crc32(buf, len, 0);
  508. if (crc != oh.data_crc) {
  509. printk(KERN_ERR"LOGFS: uncompressed data crc error at "
  510. "%llx: expected %x, got %x\n", ofs,
  511. be32_to_cpu(oh.data_crc),
  512. be32_to_cpu(crc));
  513. goto out_err;
  514. }
  515. break;
  516. case COMPR_ZLIB:
  517. mutex_lock(&logfs_super(sb)->s_journal_mutex);
  518. err = wbuf_read(sb, ofs + LOGFS_OBJECT_HEADERSIZE, len,
  519. compressor_buf);
  520. if (err) {
  521. mutex_unlock(&logfs_super(sb)->s_journal_mutex);
  522. goto out_err;
  523. }
  524. crc = logfs_crc32(compressor_buf, len, 0);
  525. if (crc != oh.data_crc) {
  526. printk(KERN_ERR"LOGFS: compressed data crc error at "
  527. "%llx: expected %x, got %x\n", ofs,
  528. be32_to_cpu(oh.data_crc),
  529. be32_to_cpu(crc));
  530. mutex_unlock(&logfs_super(sb)->s_journal_mutex);
  531. goto out_err;
  532. }
  533. err = logfs_uncompress(compressor_buf, buf, len, block_len);
  534. mutex_unlock(&logfs_super(sb)->s_journal_mutex);
  535. if (err) {
  536. printk(KERN_ERR"LOGFS: uncompress error at %llx\n", ofs);
  537. goto out_err;
  538. }
  539. break;
  540. default:
  541. LOGFS_BUG(sb);
  542. err = -EIO;
  543. goto out_err;
  544. }
  545. return 0;
  546. out_err:
  547. logfs_set_ro(sb);
  548. printk(KERN_ERR"LOGFS: device is read-only now\n");
  549. LOGFS_BUG(sb);
  550. return err;
  551. }
  552. /**
  553. * logfs_segment_read - read data block from object store
  554. * @inode: inode containing data
  555. * @buf: data buffer
  556. * @ofs: physical data offset
  557. * @bix: block index
  558. * @level: block level
  559. *
  560. * Returns 0 on success or a negative errno.
  561. */
  562. int logfs_segment_read(struct inode *inode, struct page *page,
  563. u64 ofs, u64 bix, level_t level)
  564. {
  565. int err;
  566. void *buf;
  567. if (PageUptodate(page))
  568. return 0;
  569. ofs &= ~LOGFS_FULLY_POPULATED;
  570. buf = kmap(page);
  571. err = __logfs_segment_read(inode, buf, ofs, bix, level);
  572. if (!err) {
  573. move_btree_to_page(inode, page, buf);
  574. SetPageUptodate(page);
  575. }
  576. kunmap(page);
  577. log_segment("logfs_segment_read(%lx, %llx, %x) %llx (%d)\n",
  578. inode->i_ino, bix, level, ofs, err);
  579. return err;
  580. }
  581. int logfs_segment_delete(struct inode *inode, struct logfs_shadow *shadow)
  582. {
  583. struct super_block *sb = inode->i_sb;
  584. struct logfs_super *super = logfs_super(sb);
  585. struct logfs_object_header h;
  586. u16 len;
  587. int err;
  588. super->s_flags |= LOGFS_SB_FLAG_DIRTY;
  589. BUG_ON(super->s_flags & LOGFS_SB_FLAG_SHUTDOWN);
  590. BUG_ON(shadow->old_ofs & LOGFS_FULLY_POPULATED);
  591. if (!shadow->old_ofs)
  592. return 0;
  593. log_segment("logfs_segment_delete(%llx, %llx, %x) %llx->%llx %x->%x\n",
  594. shadow->ino, shadow->bix, shadow->gc_level,
  595. shadow->old_ofs, shadow->new_ofs,
  596. shadow->old_len, shadow->new_len);
  597. err = read_obj_header(sb, shadow->old_ofs, &h);
  598. LOGFS_BUG_ON(err, sb);
  599. LOGFS_BUG_ON(be64_to_cpu(h.ino) != inode->i_ino, sb);
  600. LOGFS_BUG_ON(check_pos(sb, shadow->bix, be64_to_cpu(h.bix),
  601. shrink_level(shadow->gc_level)), sb);
  602. if (shadow->gc_level == 0)
  603. len = be16_to_cpu(h.len);
  604. else
  605. len = obj_len(sb, h.type);
  606. shadow->old_len = len + sizeof(h);
  607. return 0;
  608. }
  609. void freeseg(struct super_block *sb, u32 segno)
  610. {
  611. struct logfs_super *super = logfs_super(sb);
  612. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  613. struct page *page;
  614. u64 ofs, start, end;
  615. start = dev_ofs(sb, segno, 0);
  616. end = dev_ofs(sb, segno + 1, 0);
  617. for (ofs = start; ofs < end; ofs += PAGE_SIZE) {
  618. page = find_get_page(mapping, ofs >> PAGE_SHIFT);
  619. if (!page)
  620. continue;
  621. ClearPagePrivate(page);
  622. page_cache_release(page);
  623. }
  624. }
  625. int logfs_open_area(struct logfs_area *area, size_t bytes)
  626. {
  627. struct super_block *sb = area->a_sb;
  628. struct logfs_super *super = logfs_super(sb);
  629. int err, closed = 0;
  630. if (area->a_is_open && area->a_used_bytes + bytes <= super->s_segsize)
  631. return 0;
  632. if (area->a_is_open) {
  633. u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
  634. u32 len = super->s_segsize - area->a_written_bytes;
  635. log_gc("logfs_close_area(%x)\n", area->a_segno);
  636. pad_wbuf(area, 1);
  637. super->s_devops->writeseg(area->a_sb, ofs, len);
  638. freeseg(sb, area->a_segno);
  639. closed = 1;
  640. }
  641. area->a_used_bytes = 0;
  642. area->a_written_bytes = 0;
  643. again:
  644. area->a_ops->get_free_segment(area);
  645. area->a_ops->get_erase_count(area);
  646. log_gc("logfs_open_area(%x, %x)\n", area->a_segno, area->a_level);
  647. err = area->a_ops->erase_segment(area);
  648. if (err) {
  649. printk(KERN_WARNING "LogFS: Error erasing segment %x\n",
  650. area->a_segno);
  651. logfs_mark_segment_bad(sb, area->a_segno);
  652. goto again;
  653. }
  654. area->a_is_open = 1;
  655. return closed;
  656. }
  657. void logfs_sync_area(struct logfs_area *area)
  658. {
  659. struct super_block *sb = area->a_sb;
  660. struct logfs_super *super = logfs_super(sb);
  661. u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
  662. u32 len = (area->a_used_bytes - area->a_written_bytes);
  663. if (super->s_writesize)
  664. len &= ~(super->s_writesize - 1);
  665. if (len == 0)
  666. return;
  667. pad_wbuf(area, 0);
  668. super->s_devops->writeseg(sb, ofs, len);
  669. area->a_written_bytes += len;
  670. }
  671. void logfs_sync_segments(struct super_block *sb)
  672. {
  673. struct logfs_super *super = logfs_super(sb);
  674. int i;
  675. for_each_area(i)
  676. logfs_sync_area(super->s_area[i]);
  677. }
  678. /*
  679. * Pick a free segment to be used for this area. Effectively takes a
  680. * candidate from the free list (not really a candidate anymore).
  681. */
  682. static void ostore_get_free_segment(struct logfs_area *area)
  683. {
  684. struct super_block *sb = area->a_sb;
  685. struct logfs_super *super = logfs_super(sb);
  686. if (super->s_free_list.count == 0) {
  687. printk(KERN_ERR"LOGFS: ran out of free segments\n");
  688. LOGFS_BUG(sb);
  689. }
  690. area->a_segno = get_best_cand(sb, &super->s_free_list, NULL);
  691. }
  692. static void ostore_get_erase_count(struct logfs_area *area)
  693. {
  694. struct logfs_segment_entry se;
  695. u32 ec_level;
  696. logfs_get_segment_entry(area->a_sb, area->a_segno, &se);
  697. BUG_ON(se.ec_level == cpu_to_be32(BADSEG) ||
  698. se.valid == cpu_to_be32(RESERVED));
  699. ec_level = be32_to_cpu(se.ec_level);
  700. area->a_erase_count = (ec_level >> 4) + 1;
  701. }
  702. static int ostore_erase_segment(struct logfs_area *area)
  703. {
  704. struct super_block *sb = area->a_sb;
  705. struct logfs_segment_header sh;
  706. u64 ofs;
  707. int err;
  708. err = logfs_erase_segment(sb, area->a_segno, 0);
  709. if (err)
  710. return err;
  711. sh.pad = 0;
  712. sh.type = SEG_OSTORE;
  713. sh.level = (__force u8)area->a_level;
  714. sh.segno = cpu_to_be32(area->a_segno);
  715. sh.ec = cpu_to_be32(area->a_erase_count);
  716. sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
  717. sh.crc = logfs_crc32(&sh, sizeof(sh), 4);
  718. logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count,
  719. area->a_level);
  720. ofs = dev_ofs(sb, area->a_segno, 0);
  721. area->a_used_bytes = sizeof(sh);
  722. logfs_buf_write(area, ofs, &sh, sizeof(sh));
  723. return 0;
  724. }
  725. static const struct logfs_area_ops ostore_area_ops = {
  726. .get_free_segment = ostore_get_free_segment,
  727. .get_erase_count = ostore_get_erase_count,
  728. .erase_segment = ostore_erase_segment,
  729. };
  730. static void free_area(struct logfs_area *area)
  731. {
  732. if (area)
  733. freeseg(area->a_sb, area->a_segno);
  734. kfree(area);
  735. }
  736. static struct logfs_area *alloc_area(struct super_block *sb)
  737. {
  738. struct logfs_area *area;
  739. area = kzalloc(sizeof(*area), GFP_KERNEL);
  740. if (!area)
  741. return NULL;
  742. area->a_sb = sb;
  743. return area;
  744. }
  745. static void map_invalidatepage(struct page *page, unsigned long l)
  746. {
  747. BUG();
  748. }
  749. static int map_releasepage(struct page *page, gfp_t g)
  750. {
  751. /* Don't release these pages */
  752. return 0;
  753. }
  754. static const struct address_space_operations mapping_aops = {
  755. .invalidatepage = map_invalidatepage,
  756. .releasepage = map_releasepage,
  757. .set_page_dirty = __set_page_dirty_nobuffers,
  758. };
  759. int logfs_init_mapping(struct super_block *sb)
  760. {
  761. struct logfs_super *super = logfs_super(sb);
  762. struct address_space *mapping;
  763. struct inode *inode;
  764. inode = logfs_new_meta_inode(sb, LOGFS_INO_MAPPING);
  765. if (IS_ERR(inode))
  766. return PTR_ERR(inode);
  767. super->s_mapping_inode = inode;
  768. mapping = inode->i_mapping;
  769. mapping->a_ops = &mapping_aops;
  770. /* Would it be possible to use __GFP_HIGHMEM as well? */
  771. mapping_set_gfp_mask(mapping, GFP_NOFS);
  772. return 0;
  773. }
  774. int logfs_init_areas(struct super_block *sb)
  775. {
  776. struct logfs_super *super = logfs_super(sb);
  777. int i = -1;
  778. super->s_alias_pool = mempool_create_kmalloc_pool(600,
  779. sizeof(struct object_alias_item));
  780. if (!super->s_alias_pool)
  781. return -ENOMEM;
  782. super->s_journal_area = alloc_area(sb);
  783. if (!super->s_journal_area)
  784. goto err;
  785. for_each_area(i) {
  786. super->s_area[i] = alloc_area(sb);
  787. if (!super->s_area[i])
  788. goto err;
  789. super->s_area[i]->a_level = GC_LEVEL(i);
  790. super->s_area[i]->a_ops = &ostore_area_ops;
  791. }
  792. btree_init_mempool128(&super->s_object_alias_tree,
  793. super->s_btree_pool);
  794. return 0;
  795. err:
  796. for (i--; i >= 0; i--)
  797. free_area(super->s_area[i]);
  798. free_area(super->s_journal_area);
  799. mempool_destroy(super->s_alias_pool);
  800. return -ENOMEM;
  801. }
  802. void logfs_cleanup_areas(struct super_block *sb)
  803. {
  804. struct logfs_super *super = logfs_super(sb);
  805. int i;
  806. btree_grim_visitor128(&super->s_object_alias_tree, 0, kill_alias);
  807. for_each_area(i)
  808. free_area(super->s_area[i]);
  809. free_area(super->s_journal_area);
  810. destroy_meta_inode(super->s_mapping_inode);
  811. }