jfs_metapage.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835
  1. /*
  2. * Copyright (C) International Business Machines Corp., 2000-2005
  3. * Portions Copyright (C) Christoph Hellwig, 2001-2002
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/bio.h>
  23. #include <linux/init.h>
  24. #include <linux/buffer_head.h>
  25. #include <linux/mempool.h>
  26. #include <linux/seq_file.h>
  27. #include "jfs_incore.h"
  28. #include "jfs_superblock.h"
  29. #include "jfs_filsys.h"
  30. #include "jfs_metapage.h"
  31. #include "jfs_txnmgr.h"
  32. #include "jfs_debug.h"
  33. #ifdef CONFIG_JFS_STATISTICS
  34. static struct {
  35. uint pagealloc; /* # of page allocations */
  36. uint pagefree; /* # of page frees */
  37. uint lockwait; /* # of sleeping lock_metapage() calls */
  38. } mpStat;
  39. #endif
  40. #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
  41. #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
  42. static inline void unlock_metapage(struct metapage *mp)
  43. {
  44. clear_bit_unlock(META_locked, &mp->flag);
  45. wake_up(&mp->wait);
  46. }
  47. static inline void __lock_metapage(struct metapage *mp)
  48. {
  49. DECLARE_WAITQUEUE(wait, current);
  50. INCREMENT(mpStat.lockwait);
  51. add_wait_queue_exclusive(&mp->wait, &wait);
  52. do {
  53. set_current_state(TASK_UNINTERRUPTIBLE);
  54. if (metapage_locked(mp)) {
  55. unlock_page(mp->page);
  56. io_schedule();
  57. lock_page(mp->page);
  58. }
  59. } while (trylock_metapage(mp));
  60. __set_current_state(TASK_RUNNING);
  61. remove_wait_queue(&mp->wait, &wait);
  62. }
  63. /*
  64. * Must have mp->page locked
  65. */
  66. static inline void lock_metapage(struct metapage *mp)
  67. {
  68. if (trylock_metapage(mp))
  69. __lock_metapage(mp);
  70. }
  71. #define METAPOOL_MIN_PAGES 32
  72. static struct kmem_cache *metapage_cache;
  73. static mempool_t *metapage_mempool;
  74. #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
  75. #if MPS_PER_PAGE > 1
  76. struct meta_anchor {
  77. int mp_count;
  78. atomic_t io_count;
  79. struct metapage *mp[MPS_PER_PAGE];
  80. };
  81. #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
  82. static inline struct metapage *page_to_mp(struct page *page, int offset)
  83. {
  84. if (!PagePrivate(page))
  85. return NULL;
  86. return mp_anchor(page)->mp[offset >> L2PSIZE];
  87. }
  88. static inline int insert_metapage(struct page *page, struct metapage *mp)
  89. {
  90. struct meta_anchor *a;
  91. int index;
  92. int l2mp_blocks; /* log2 blocks per metapage */
  93. if (PagePrivate(page))
  94. a = mp_anchor(page);
  95. else {
  96. a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
  97. if (!a)
  98. return -ENOMEM;
  99. set_page_private(page, (unsigned long)a);
  100. SetPagePrivate(page);
  101. kmap(page);
  102. }
  103. if (mp) {
  104. l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
  105. index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
  106. a->mp_count++;
  107. a->mp[index] = mp;
  108. }
  109. return 0;
  110. }
  111. static inline void remove_metapage(struct page *page, struct metapage *mp)
  112. {
  113. struct meta_anchor *a = mp_anchor(page);
  114. int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
  115. int index;
  116. index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
  117. BUG_ON(a->mp[index] != mp);
  118. a->mp[index] = NULL;
  119. if (--a->mp_count == 0) {
  120. kfree(a);
  121. set_page_private(page, 0);
  122. ClearPagePrivate(page);
  123. kunmap(page);
  124. }
  125. }
  126. static inline void inc_io(struct page *page)
  127. {
  128. atomic_inc(&mp_anchor(page)->io_count);
  129. }
  130. static inline void dec_io(struct page *page, void (*handler) (struct page *))
  131. {
  132. if (atomic_dec_and_test(&mp_anchor(page)->io_count))
  133. handler(page);
  134. }
  135. #else
  136. static inline struct metapage *page_to_mp(struct page *page, int offset)
  137. {
  138. return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
  139. }
  140. static inline int insert_metapage(struct page *page, struct metapage *mp)
  141. {
  142. if (mp) {
  143. set_page_private(page, (unsigned long)mp);
  144. SetPagePrivate(page);
  145. kmap(page);
  146. }
  147. return 0;
  148. }
  149. static inline void remove_metapage(struct page *page, struct metapage *mp)
  150. {
  151. set_page_private(page, 0);
  152. ClearPagePrivate(page);
  153. kunmap(page);
  154. }
  155. #define inc_io(page) do {} while(0)
  156. #define dec_io(page, handler) handler(page)
  157. #endif
  158. static void init_once(void *foo)
  159. {
  160. struct metapage *mp = (struct metapage *)foo;
  161. mp->lid = 0;
  162. mp->lsn = 0;
  163. mp->flag = 0;
  164. mp->data = NULL;
  165. mp->clsn = 0;
  166. mp->log = NULL;
  167. set_bit(META_free, &mp->flag);
  168. init_waitqueue_head(&mp->wait);
  169. }
  170. static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
  171. {
  172. return mempool_alloc(metapage_mempool, gfp_mask);
  173. }
  174. static inline void free_metapage(struct metapage *mp)
  175. {
  176. mp->flag = 0;
  177. set_bit(META_free, &mp->flag);
  178. mempool_free(mp, metapage_mempool);
  179. }
  180. int __init metapage_init(void)
  181. {
  182. /*
  183. * Allocate the metapage structures
  184. */
  185. metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
  186. 0, 0, init_once);
  187. if (metapage_cache == NULL)
  188. return -ENOMEM;
  189. metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
  190. metapage_cache);
  191. if (metapage_mempool == NULL) {
  192. kmem_cache_destroy(metapage_cache);
  193. return -ENOMEM;
  194. }
  195. return 0;
  196. }
  197. void metapage_exit(void)
  198. {
  199. mempool_destroy(metapage_mempool);
  200. kmem_cache_destroy(metapage_cache);
  201. }
  202. static inline void drop_metapage(struct page *page, struct metapage *mp)
  203. {
  204. if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
  205. test_bit(META_io, &mp->flag))
  206. return;
  207. remove_metapage(page, mp);
  208. INCREMENT(mpStat.pagefree);
  209. free_metapage(mp);
  210. }
  211. /*
  212. * Metapage address space operations
  213. */
  214. static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
  215. int *len)
  216. {
  217. int rc = 0;
  218. int xflag;
  219. s64 xaddr;
  220. sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
  221. inode->i_blkbits;
  222. if (lblock >= file_blocks)
  223. return 0;
  224. if (lblock + *len > file_blocks)
  225. *len = file_blocks - lblock;
  226. if (inode->i_ino) {
  227. rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
  228. if ((rc == 0) && *len)
  229. lblock = (sector_t)xaddr;
  230. else
  231. lblock = 0;
  232. } /* else no mapping */
  233. return lblock;
  234. }
  235. static void last_read_complete(struct page *page)
  236. {
  237. if (!PageError(page))
  238. SetPageUptodate(page);
  239. unlock_page(page);
  240. }
  241. static void metapage_read_end_io(struct bio *bio, int err)
  242. {
  243. struct page *page = bio->bi_private;
  244. if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
  245. printk(KERN_ERR "metapage_read_end_io: I/O error\n");
  246. SetPageError(page);
  247. }
  248. dec_io(page, last_read_complete);
  249. bio_put(bio);
  250. }
  251. static void remove_from_logsync(struct metapage *mp)
  252. {
  253. struct jfs_log *log = mp->log;
  254. unsigned long flags;
  255. /*
  256. * This can race. Recheck that log hasn't been set to null, and after
  257. * acquiring logsync lock, recheck lsn
  258. */
  259. if (!log)
  260. return;
  261. LOGSYNC_LOCK(log, flags);
  262. if (mp->lsn) {
  263. mp->log = NULL;
  264. mp->lsn = 0;
  265. mp->clsn = 0;
  266. log->count--;
  267. list_del(&mp->synclist);
  268. }
  269. LOGSYNC_UNLOCK(log, flags);
  270. }
  271. static void last_write_complete(struct page *page)
  272. {
  273. struct metapage *mp;
  274. unsigned int offset;
  275. for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
  276. mp = page_to_mp(page, offset);
  277. if (mp && test_bit(META_io, &mp->flag)) {
  278. if (mp->lsn)
  279. remove_from_logsync(mp);
  280. clear_bit(META_io, &mp->flag);
  281. }
  282. /*
  283. * I'd like to call drop_metapage here, but I don't think it's
  284. * safe unless I have the page locked
  285. */
  286. }
  287. end_page_writeback(page);
  288. }
  289. static void metapage_write_end_io(struct bio *bio, int err)
  290. {
  291. struct page *page = bio->bi_private;
  292. BUG_ON(!PagePrivate(page));
  293. if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) {
  294. printk(KERN_ERR "metapage_write_end_io: I/O error\n");
  295. SetPageError(page);
  296. }
  297. dec_io(page, last_write_complete);
  298. bio_put(bio);
  299. }
  300. static int metapage_writepage(struct page *page, struct writeback_control *wbc)
  301. {
  302. struct bio *bio = NULL;
  303. int block_offset; /* block offset of mp within page */
  304. struct inode *inode = page->mapping->host;
  305. int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
  306. int len;
  307. int xlen;
  308. struct metapage *mp;
  309. int redirty = 0;
  310. sector_t lblock;
  311. int nr_underway = 0;
  312. sector_t pblock;
  313. sector_t next_block = 0;
  314. sector_t page_start;
  315. unsigned long bio_bytes = 0;
  316. unsigned long bio_offset = 0;
  317. int offset;
  318. page_start = (sector_t)page->index <<
  319. (PAGE_CACHE_SHIFT - inode->i_blkbits);
  320. BUG_ON(!PageLocked(page));
  321. BUG_ON(PageWriteback(page));
  322. set_page_writeback(page);
  323. for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
  324. mp = page_to_mp(page, offset);
  325. if (!mp || !test_bit(META_dirty, &mp->flag))
  326. continue;
  327. if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
  328. redirty = 1;
  329. /*
  330. * Make sure this page isn't blocked indefinitely.
  331. * If the journal isn't undergoing I/O, push it
  332. */
  333. if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
  334. jfs_flush_journal(mp->log, 0);
  335. continue;
  336. }
  337. clear_bit(META_dirty, &mp->flag);
  338. block_offset = offset >> inode->i_blkbits;
  339. lblock = page_start + block_offset;
  340. if (bio) {
  341. if (xlen && lblock == next_block) {
  342. /* Contiguous, in memory & on disk */
  343. len = min(xlen, blocks_per_mp);
  344. xlen -= len;
  345. bio_bytes += len << inode->i_blkbits;
  346. set_bit(META_io, &mp->flag);
  347. continue;
  348. }
  349. /* Not contiguous */
  350. if (bio_add_page(bio, page, bio_bytes, bio_offset) <
  351. bio_bytes)
  352. goto add_failed;
  353. /*
  354. * Increment counter before submitting i/o to keep
  355. * count from hitting zero before we're through
  356. */
  357. inc_io(page);
  358. if (!bio->bi_size)
  359. goto dump_bio;
  360. submit_bio(WRITE, bio);
  361. nr_underway++;
  362. bio = NULL;
  363. } else
  364. inc_io(page);
  365. xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits;
  366. pblock = metapage_get_blocks(inode, lblock, &xlen);
  367. if (!pblock) {
  368. /* Need better error handling */
  369. printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
  370. dec_io(page, last_write_complete);
  371. continue;
  372. }
  373. set_bit(META_io, &mp->flag);
  374. len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
  375. bio = bio_alloc(GFP_NOFS, 1);
  376. bio->bi_bdev = inode->i_sb->s_bdev;
  377. bio->bi_sector = pblock << (inode->i_blkbits - 9);
  378. bio->bi_end_io = metapage_write_end_io;
  379. bio->bi_private = page;
  380. /* Don't call bio_add_page yet, we may add to this vec */
  381. bio_offset = offset;
  382. bio_bytes = len << inode->i_blkbits;
  383. xlen -= len;
  384. next_block = lblock + len;
  385. }
  386. if (bio) {
  387. if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
  388. goto add_failed;
  389. if (!bio->bi_size)
  390. goto dump_bio;
  391. submit_bio(WRITE, bio);
  392. nr_underway++;
  393. }
  394. if (redirty)
  395. redirty_page_for_writepage(wbc, page);
  396. unlock_page(page);
  397. if (nr_underway == 0)
  398. end_page_writeback(page);
  399. return 0;
  400. add_failed:
  401. /* We should never reach here, since we're only adding one vec */
  402. printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
  403. goto skip;
  404. dump_bio:
  405. print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
  406. 4, bio, sizeof(*bio), 0);
  407. skip:
  408. bio_put(bio);
  409. unlock_page(page);
  410. dec_io(page, last_write_complete);
  411. return -EIO;
  412. }
  413. static int metapage_readpage(struct file *fp, struct page *page)
  414. {
  415. struct inode *inode = page->mapping->host;
  416. struct bio *bio = NULL;
  417. int block_offset;
  418. int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
  419. sector_t page_start; /* address of page in fs blocks */
  420. sector_t pblock;
  421. int xlen;
  422. unsigned int len;
  423. int offset;
  424. BUG_ON(!PageLocked(page));
  425. page_start = (sector_t)page->index <<
  426. (PAGE_CACHE_SHIFT - inode->i_blkbits);
  427. block_offset = 0;
  428. while (block_offset < blocks_per_page) {
  429. xlen = blocks_per_page - block_offset;
  430. pblock = metapage_get_blocks(inode, page_start + block_offset,
  431. &xlen);
  432. if (pblock) {
  433. if (!PagePrivate(page))
  434. insert_metapage(page, NULL);
  435. inc_io(page);
  436. if (bio)
  437. submit_bio(READ, bio);
  438. bio = bio_alloc(GFP_NOFS, 1);
  439. bio->bi_bdev = inode->i_sb->s_bdev;
  440. bio->bi_sector = pblock << (inode->i_blkbits - 9);
  441. bio->bi_end_io = metapage_read_end_io;
  442. bio->bi_private = page;
  443. len = xlen << inode->i_blkbits;
  444. offset = block_offset << inode->i_blkbits;
  445. if (bio_add_page(bio, page, len, offset) < len)
  446. goto add_failed;
  447. block_offset += xlen;
  448. } else
  449. block_offset++;
  450. }
  451. if (bio)
  452. submit_bio(READ, bio);
  453. else
  454. unlock_page(page);
  455. return 0;
  456. add_failed:
  457. printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
  458. bio_put(bio);
  459. dec_io(page, last_read_complete);
  460. return -EIO;
  461. }
  462. static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
  463. {
  464. struct metapage *mp;
  465. int ret = 1;
  466. int offset;
  467. for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
  468. mp = page_to_mp(page, offset);
  469. if (!mp)
  470. continue;
  471. jfs_info("metapage_releasepage: mp = 0x%p", mp);
  472. if (mp->count || mp->nohomeok ||
  473. test_bit(META_dirty, &mp->flag)) {
  474. jfs_info("count = %ld, nohomeok = %d", mp->count,
  475. mp->nohomeok);
  476. ret = 0;
  477. continue;
  478. }
  479. if (mp->lsn)
  480. remove_from_logsync(mp);
  481. remove_metapage(page, mp);
  482. INCREMENT(mpStat.pagefree);
  483. free_metapage(mp);
  484. }
  485. return ret;
  486. }
  487. static void metapage_invalidatepage(struct page *page, unsigned long offset)
  488. {
  489. BUG_ON(offset);
  490. BUG_ON(PageWriteback(page));
  491. metapage_releasepage(page, 0);
  492. }
  493. const struct address_space_operations jfs_metapage_aops = {
  494. .readpage = metapage_readpage,
  495. .writepage = metapage_writepage,
  496. .sync_page = block_sync_page,
  497. .releasepage = metapage_releasepage,
  498. .invalidatepage = metapage_invalidatepage,
  499. .set_page_dirty = __set_page_dirty_nobuffers,
  500. };
  501. struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
  502. unsigned int size, int absolute,
  503. unsigned long new)
  504. {
  505. int l2BlocksPerPage;
  506. int l2bsize;
  507. struct address_space *mapping;
  508. struct metapage *mp = NULL;
  509. struct page *page;
  510. unsigned long page_index;
  511. unsigned long page_offset;
  512. jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
  513. inode->i_ino, lblock, absolute);
  514. l2bsize = inode->i_blkbits;
  515. l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
  516. page_index = lblock >> l2BlocksPerPage;
  517. page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
  518. if ((page_offset + size) > PAGE_CACHE_SIZE) {
  519. jfs_err("MetaData crosses page boundary!!");
  520. jfs_err("lblock = %lx, size = %d", lblock, size);
  521. dump_stack();
  522. return NULL;
  523. }
  524. if (absolute)
  525. mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
  526. else {
  527. /*
  528. * If an nfs client tries to read an inode that is larger
  529. * than any existing inodes, we may try to read past the
  530. * end of the inode map
  531. */
  532. if ((lblock << inode->i_blkbits) >= inode->i_size)
  533. return NULL;
  534. mapping = inode->i_mapping;
  535. }
  536. if (new && (PSIZE == PAGE_CACHE_SIZE)) {
  537. page = grab_cache_page(mapping, page_index);
  538. if (!page) {
  539. jfs_err("grab_cache_page failed!");
  540. return NULL;
  541. }
  542. SetPageUptodate(page);
  543. } else {
  544. page = read_mapping_page(mapping, page_index, NULL);
  545. if (IS_ERR(page) || !PageUptodate(page)) {
  546. jfs_err("read_mapping_page failed!");
  547. return NULL;
  548. }
  549. lock_page(page);
  550. }
  551. mp = page_to_mp(page, page_offset);
  552. if (mp) {
  553. if (mp->logical_size != size) {
  554. jfs_error(inode->i_sb,
  555. "__get_metapage: mp->logical_size != size");
  556. jfs_err("logical_size = %d, size = %d",
  557. mp->logical_size, size);
  558. dump_stack();
  559. goto unlock;
  560. }
  561. mp->count++;
  562. lock_metapage(mp);
  563. if (test_bit(META_discard, &mp->flag)) {
  564. if (!new) {
  565. jfs_error(inode->i_sb,
  566. "__get_metapage: using a "
  567. "discarded metapage");
  568. discard_metapage(mp);
  569. goto unlock;
  570. }
  571. clear_bit(META_discard, &mp->flag);
  572. }
  573. } else {
  574. INCREMENT(mpStat.pagealloc);
  575. mp = alloc_metapage(GFP_NOFS);
  576. mp->page = page;
  577. mp->flag = 0;
  578. mp->xflag = COMMIT_PAGE;
  579. mp->count = 1;
  580. mp->nohomeok = 0;
  581. mp->logical_size = size;
  582. mp->data = page_address(page) + page_offset;
  583. mp->index = lblock;
  584. if (unlikely(insert_metapage(page, mp))) {
  585. free_metapage(mp);
  586. goto unlock;
  587. }
  588. lock_metapage(mp);
  589. }
  590. if (new) {
  591. jfs_info("zeroing mp = 0x%p", mp);
  592. memset(mp->data, 0, PSIZE);
  593. }
  594. unlock_page(page);
  595. jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
  596. return mp;
  597. unlock:
  598. unlock_page(page);
  599. return NULL;
  600. }
  601. void grab_metapage(struct metapage * mp)
  602. {
  603. jfs_info("grab_metapage: mp = 0x%p", mp);
  604. page_cache_get(mp->page);
  605. lock_page(mp->page);
  606. mp->count++;
  607. lock_metapage(mp);
  608. unlock_page(mp->page);
  609. }
  610. void force_metapage(struct metapage *mp)
  611. {
  612. struct page *page = mp->page;
  613. jfs_info("force_metapage: mp = 0x%p", mp);
  614. set_bit(META_forcewrite, &mp->flag);
  615. clear_bit(META_sync, &mp->flag);
  616. page_cache_get(page);
  617. lock_page(page);
  618. set_page_dirty(page);
  619. write_one_page(page, 1);
  620. clear_bit(META_forcewrite, &mp->flag);
  621. page_cache_release(page);
  622. }
  623. void hold_metapage(struct metapage *mp)
  624. {
  625. lock_page(mp->page);
  626. }
  627. void put_metapage(struct metapage *mp)
  628. {
  629. if (mp->count || mp->nohomeok) {
  630. /* Someone else will release this */
  631. unlock_page(mp->page);
  632. return;
  633. }
  634. page_cache_get(mp->page);
  635. mp->count++;
  636. lock_metapage(mp);
  637. unlock_page(mp->page);
  638. release_metapage(mp);
  639. }
  640. void release_metapage(struct metapage * mp)
  641. {
  642. struct page *page = mp->page;
  643. jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
  644. BUG_ON(!page);
  645. lock_page(page);
  646. unlock_metapage(mp);
  647. assert(mp->count);
  648. if (--mp->count || mp->nohomeok) {
  649. unlock_page(page);
  650. page_cache_release(page);
  651. return;
  652. }
  653. if (test_bit(META_dirty, &mp->flag)) {
  654. set_page_dirty(page);
  655. if (test_bit(META_sync, &mp->flag)) {
  656. clear_bit(META_sync, &mp->flag);
  657. write_one_page(page, 1);
  658. lock_page(page); /* write_one_page unlocks the page */
  659. }
  660. } else if (mp->lsn) /* discard_metapage doesn't remove it */
  661. remove_from_logsync(mp);
  662. /* Try to keep metapages from using up too much memory */
  663. drop_metapage(page, mp);
  664. unlock_page(page);
  665. page_cache_release(page);
  666. }
  667. void __invalidate_metapages(struct inode *ip, s64 addr, int len)
  668. {
  669. sector_t lblock;
  670. int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
  671. int BlocksPerPage = 1 << l2BlocksPerPage;
  672. /* All callers are interested in block device's mapping */
  673. struct address_space *mapping =
  674. JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
  675. struct metapage *mp;
  676. struct page *page;
  677. unsigned int offset;
  678. /*
  679. * Mark metapages to discard. They will eventually be
  680. * released, but should not be written.
  681. */
  682. for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
  683. lblock += BlocksPerPage) {
  684. page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
  685. if (!page)
  686. continue;
  687. for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
  688. mp = page_to_mp(page, offset);
  689. if (!mp)
  690. continue;
  691. if (mp->index < addr)
  692. continue;
  693. if (mp->index >= addr + len)
  694. break;
  695. clear_bit(META_dirty, &mp->flag);
  696. set_bit(META_discard, &mp->flag);
  697. if (mp->lsn)
  698. remove_from_logsync(mp);
  699. }
  700. unlock_page(page);
  701. page_cache_release(page);
  702. }
  703. }
  704. #ifdef CONFIG_JFS_STATISTICS
  705. static int jfs_mpstat_proc_show(struct seq_file *m, void *v)
  706. {
  707. seq_printf(m,
  708. "JFS Metapage statistics\n"
  709. "=======================\n"
  710. "page allocations = %d\n"
  711. "page frees = %d\n"
  712. "lock waits = %d\n",
  713. mpStat.pagealloc,
  714. mpStat.pagefree,
  715. mpStat.lockwait);
  716. return 0;
  717. }
  718. static int jfs_mpstat_proc_open(struct inode *inode, struct file *file)
  719. {
  720. return single_open(file, jfs_mpstat_proc_show, NULL);
  721. }
  722. const struct file_operations jfs_mpstat_proc_fops = {
  723. .owner = THIS_MODULE,
  724. .open = jfs_mpstat_proc_open,
  725. .read = seq_read,
  726. .llseek = seq_lseek,
  727. .release = single_release,
  728. };
  729. #endif