mbcache.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. /*
  2. * linux/fs/mbcache.c
  3. * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
  4. */
  5. /*
  6. * Filesystem Meta Information Block Cache (mbcache)
  7. *
  8. * The mbcache caches blocks of block devices that need to be located
  9. * by their device/block number, as well as by other criteria (such
  10. * as the block's contents).
  11. *
  12. * There can only be one cache entry in a cache per device and block number.
  13. * Additional indexes need not be unique in this sense. The number of
  14. * additional indexes (=other criteria) can be hardwired at compile time
  15. * or specified at cache create time.
  16. *
  17. * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
  18. * in the cache. A valid entry is in the main hash tables of the cache,
  19. * and may also be in the lru list. An invalid entry is not in any hashes
  20. * or lists.
  21. *
  22. * A valid cache entry is only in the lru list if no handles refer to it.
  23. * Invalid cache entries will be freed when the last handle to the cache
  24. * entry is released. Entries that cannot be freed immediately are put
  25. * back on the lru list.
  26. */
  27. #include <linux/kernel.h>
  28. #include <linux/module.h>
  29. #include <linux/hash.h>
  30. #include <linux/fs.h>
  31. #include <linux/mm.h>
  32. #include <linux/slab.h>
  33. #include <linux/sched.h>
  34. #include <linux/init.h>
  35. #include <linux/mbcache.h>
  36. #ifdef MB_CACHE_DEBUG
  37. # define mb_debug(f...) do { \
  38. printk(KERN_DEBUG f); \
  39. printk("\n"); \
  40. } while (0)
  41. #define mb_assert(c) do { if (!(c)) \
  42. printk(KERN_ERR "assertion " #c " failed\n"); \
  43. } while(0)
  44. #else
  45. # define mb_debug(f...) do { } while(0)
  46. # define mb_assert(c) do { } while(0)
  47. #endif
  48. #define mb_error(f...) do { \
  49. printk(KERN_ERR f); \
  50. printk("\n"); \
  51. } while(0)
  52. #define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
  53. static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
  54. MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
  55. MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
  56. MODULE_LICENSE("GPL");
  57. EXPORT_SYMBOL(mb_cache_create);
  58. EXPORT_SYMBOL(mb_cache_shrink);
  59. EXPORT_SYMBOL(mb_cache_destroy);
  60. EXPORT_SYMBOL(mb_cache_entry_alloc);
  61. EXPORT_SYMBOL(mb_cache_entry_insert);
  62. EXPORT_SYMBOL(mb_cache_entry_release);
  63. EXPORT_SYMBOL(mb_cache_entry_free);
  64. EXPORT_SYMBOL(mb_cache_entry_get);
  65. #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
  66. EXPORT_SYMBOL(mb_cache_entry_find_first);
  67. EXPORT_SYMBOL(mb_cache_entry_find_next);
  68. #endif
  69. /*
  70. * Global data: list of all mbcache's, lru list, and a spinlock for
  71. * accessing cache data structures on SMP machines. The lru list is
  72. * global across all mbcaches.
  73. */
  74. static LIST_HEAD(mb_cache_list);
  75. static LIST_HEAD(mb_cache_lru_list);
  76. static DEFINE_SPINLOCK(mb_cache_spinlock);
  77. static inline int
  78. __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
  79. {
  80. return !list_empty(&ce->e_block_list);
  81. }
  82. static void
  83. __mb_cache_entry_unhash(struct mb_cache_entry *ce)
  84. {
  85. if (__mb_cache_entry_is_hashed(ce)) {
  86. list_del_init(&ce->e_block_list);
  87. list_del(&ce->e_index.o_list);
  88. }
  89. }
  90. static void
  91. __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
  92. {
  93. struct mb_cache *cache = ce->e_cache;
  94. mb_assert(!(ce->e_used || ce->e_queued));
  95. kmem_cache_free(cache->c_entry_cache, ce);
  96. atomic_dec(&cache->c_entry_count);
  97. }
  98. static void
  99. __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
  100. __releases(mb_cache_spinlock)
  101. {
  102. /* Wake up all processes queuing for this cache entry. */
  103. if (ce->e_queued)
  104. wake_up_all(&mb_cache_queue);
  105. if (ce->e_used >= MB_CACHE_WRITER)
  106. ce->e_used -= MB_CACHE_WRITER;
  107. ce->e_used--;
  108. if (!(ce->e_used || ce->e_queued)) {
  109. if (!__mb_cache_entry_is_hashed(ce))
  110. goto forget;
  111. mb_assert(list_empty(&ce->e_lru_list));
  112. list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
  113. }
  114. spin_unlock(&mb_cache_spinlock);
  115. return;
  116. forget:
  117. spin_unlock(&mb_cache_spinlock);
  118. __mb_cache_entry_forget(ce, GFP_KERNEL);
  119. }
  120. /*
  121. * mb_cache_shrink_scan() memory pressure callback
  122. *
  123. * This function is called by the kernel memory management when memory
  124. * gets low.
  125. *
  126. * @shrink: (ignored)
  127. * @sc: shrink_control passed from reclaim
  128. *
  129. * Returns the number of objects freed.
  130. */
  131. static unsigned long
  132. mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  133. {
  134. LIST_HEAD(free_list);
  135. struct mb_cache_entry *entry, *tmp;
  136. int nr_to_scan = sc->nr_to_scan;
  137. gfp_t gfp_mask = sc->gfp_mask;
  138. unsigned long freed = 0;
  139. mb_debug("trying to free %d entries", nr_to_scan);
  140. spin_lock(&mb_cache_spinlock);
  141. while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
  142. struct mb_cache_entry *ce =
  143. list_entry(mb_cache_lru_list.next,
  144. struct mb_cache_entry, e_lru_list);
  145. list_move_tail(&ce->e_lru_list, &free_list);
  146. __mb_cache_entry_unhash(ce);
  147. freed++;
  148. }
  149. spin_unlock(&mb_cache_spinlock);
  150. list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
  151. __mb_cache_entry_forget(entry, gfp_mask);
  152. }
  153. return freed;
  154. }
  155. static unsigned long
  156. mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  157. {
  158. struct mb_cache *cache;
  159. unsigned long count = 0;
  160. spin_lock(&mb_cache_spinlock);
  161. list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
  162. mb_debug("cache %s (%d)", cache->c_name,
  163. atomic_read(&cache->c_entry_count));
  164. count += atomic_read(&cache->c_entry_count);
  165. }
  166. spin_unlock(&mb_cache_spinlock);
  167. return vfs_pressure_ratio(count);
  168. }
  169. static struct shrinker mb_cache_shrinker = {
  170. .count_objects = mb_cache_shrink_count,
  171. .scan_objects = mb_cache_shrink_scan,
  172. .seeks = DEFAULT_SEEKS,
  173. };
  174. /*
  175. * mb_cache_create() create a new cache
  176. *
  177. * All entries in one cache are equal size. Cache entries may be from
  178. * multiple devices. If this is the first mbcache created, registers
  179. * the cache with kernel memory management. Returns NULL if no more
  180. * memory was available.
  181. *
  182. * @name: name of the cache (informal)
  183. * @bucket_bits: log2(number of hash buckets)
  184. */
  185. struct mb_cache *
  186. mb_cache_create(const char *name, int bucket_bits)
  187. {
  188. int n, bucket_count = 1 << bucket_bits;
  189. struct mb_cache *cache = NULL;
  190. cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
  191. if (!cache)
  192. return NULL;
  193. cache->c_name = name;
  194. atomic_set(&cache->c_entry_count, 0);
  195. cache->c_bucket_bits = bucket_bits;
  196. cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
  197. GFP_KERNEL);
  198. if (!cache->c_block_hash)
  199. goto fail;
  200. for (n=0; n<bucket_count; n++)
  201. INIT_LIST_HEAD(&cache->c_block_hash[n]);
  202. cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
  203. GFP_KERNEL);
  204. if (!cache->c_index_hash)
  205. goto fail;
  206. for (n=0; n<bucket_count; n++)
  207. INIT_LIST_HEAD(&cache->c_index_hash[n]);
  208. cache->c_entry_cache = kmem_cache_create(name,
  209. sizeof(struct mb_cache_entry), 0,
  210. SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
  211. if (!cache->c_entry_cache)
  212. goto fail2;
  213. /*
  214. * Set an upper limit on the number of cache entries so that the hash
  215. * chains won't grow too long.
  216. */
  217. cache->c_max_entries = bucket_count << 4;
  218. spin_lock(&mb_cache_spinlock);
  219. list_add(&cache->c_cache_list, &mb_cache_list);
  220. spin_unlock(&mb_cache_spinlock);
  221. return cache;
  222. fail2:
  223. kfree(cache->c_index_hash);
  224. fail:
  225. kfree(cache->c_block_hash);
  226. kfree(cache);
  227. return NULL;
  228. }
  229. /*
  230. * mb_cache_shrink()
  231. *
  232. * Removes all cache entries of a device from the cache. All cache entries
  233. * currently in use cannot be freed, and thus remain in the cache. All others
  234. * are freed.
  235. *
  236. * @bdev: which device's cache entries to shrink
  237. */
  238. void
  239. mb_cache_shrink(struct block_device *bdev)
  240. {
  241. LIST_HEAD(free_list);
  242. struct list_head *l, *ltmp;
  243. spin_lock(&mb_cache_spinlock);
  244. list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
  245. struct mb_cache_entry *ce =
  246. list_entry(l, struct mb_cache_entry, e_lru_list);
  247. if (ce->e_bdev == bdev) {
  248. list_move_tail(&ce->e_lru_list, &free_list);
  249. __mb_cache_entry_unhash(ce);
  250. }
  251. }
  252. spin_unlock(&mb_cache_spinlock);
  253. list_for_each_safe(l, ltmp, &free_list) {
  254. __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
  255. e_lru_list), GFP_KERNEL);
  256. }
  257. }
  258. /*
  259. * mb_cache_destroy()
  260. *
  261. * Shrinks the cache to its minimum possible size (hopefully 0 entries),
  262. * and then destroys it. If this was the last mbcache, un-registers the
  263. * mbcache from kernel memory management.
  264. */
  265. void
  266. mb_cache_destroy(struct mb_cache *cache)
  267. {
  268. LIST_HEAD(free_list);
  269. struct list_head *l, *ltmp;
  270. spin_lock(&mb_cache_spinlock);
  271. list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
  272. struct mb_cache_entry *ce =
  273. list_entry(l, struct mb_cache_entry, e_lru_list);
  274. if (ce->e_cache == cache) {
  275. list_move_tail(&ce->e_lru_list, &free_list);
  276. __mb_cache_entry_unhash(ce);
  277. }
  278. }
  279. list_del(&cache->c_cache_list);
  280. spin_unlock(&mb_cache_spinlock);
  281. list_for_each_safe(l, ltmp, &free_list) {
  282. __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
  283. e_lru_list), GFP_KERNEL);
  284. }
  285. if (atomic_read(&cache->c_entry_count) > 0) {
  286. mb_error("cache %s: %d orphaned entries",
  287. cache->c_name,
  288. atomic_read(&cache->c_entry_count));
  289. }
  290. kmem_cache_destroy(cache->c_entry_cache);
  291. kfree(cache->c_index_hash);
  292. kfree(cache->c_block_hash);
  293. kfree(cache);
  294. }
  295. /*
  296. * mb_cache_entry_alloc()
  297. *
  298. * Allocates a new cache entry. The new entry will not be valid initially,
  299. * and thus cannot be looked up yet. It should be filled with data, and
  300. * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
  301. * if no more memory was available.
  302. */
  303. struct mb_cache_entry *
  304. mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
  305. {
  306. struct mb_cache_entry *ce = NULL;
  307. if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
  308. spin_lock(&mb_cache_spinlock);
  309. if (!list_empty(&mb_cache_lru_list)) {
  310. ce = list_entry(mb_cache_lru_list.next,
  311. struct mb_cache_entry, e_lru_list);
  312. list_del_init(&ce->e_lru_list);
  313. __mb_cache_entry_unhash(ce);
  314. }
  315. spin_unlock(&mb_cache_spinlock);
  316. }
  317. if (!ce) {
  318. ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
  319. if (!ce)
  320. return NULL;
  321. atomic_inc(&cache->c_entry_count);
  322. INIT_LIST_HEAD(&ce->e_lru_list);
  323. INIT_LIST_HEAD(&ce->e_block_list);
  324. ce->e_cache = cache;
  325. ce->e_queued = 0;
  326. }
  327. ce->e_used = 1 + MB_CACHE_WRITER;
  328. return ce;
  329. }
  330. /*
  331. * mb_cache_entry_insert()
  332. *
  333. * Inserts an entry that was allocated using mb_cache_entry_alloc() into
  334. * the cache. After this, the cache entry can be looked up, but is not yet
  335. * in the lru list as the caller still holds a handle to it. Returns 0 on
  336. * success, or -EBUSY if a cache entry for that device + inode exists
  337. * already (this may happen after a failed lookup, but when another process
  338. * has inserted the same cache entry in the meantime).
  339. *
  340. * @bdev: device the cache entry belongs to
  341. * @block: block number
  342. * @key: lookup key
  343. */
  344. int
  345. mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
  346. sector_t block, unsigned int key)
  347. {
  348. struct mb_cache *cache = ce->e_cache;
  349. unsigned int bucket;
  350. struct list_head *l;
  351. int error = -EBUSY;
  352. bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
  353. cache->c_bucket_bits);
  354. spin_lock(&mb_cache_spinlock);
  355. list_for_each_prev(l, &cache->c_block_hash[bucket]) {
  356. struct mb_cache_entry *ce =
  357. list_entry(l, struct mb_cache_entry, e_block_list);
  358. if (ce->e_bdev == bdev && ce->e_block == block)
  359. goto out;
  360. }
  361. __mb_cache_entry_unhash(ce);
  362. ce->e_bdev = bdev;
  363. ce->e_block = block;
  364. list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
  365. ce->e_index.o_key = key;
  366. bucket = hash_long(key, cache->c_bucket_bits);
  367. list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
  368. error = 0;
  369. out:
  370. spin_unlock(&mb_cache_spinlock);
  371. return error;
  372. }
  373. /*
  374. * mb_cache_entry_release()
  375. *
  376. * Release a handle to a cache entry. When the last handle to a cache entry
  377. * is released it is either freed (if it is invalid) or otherwise inserted
  378. * in to the lru list.
  379. */
  380. void
  381. mb_cache_entry_release(struct mb_cache_entry *ce)
  382. {
  383. spin_lock(&mb_cache_spinlock);
  384. __mb_cache_entry_release_unlock(ce);
  385. }
  386. /*
  387. * mb_cache_entry_free()
  388. *
  389. * This is equivalent to the sequence mb_cache_entry_takeout() --
  390. * mb_cache_entry_release().
  391. */
  392. void
  393. mb_cache_entry_free(struct mb_cache_entry *ce)
  394. {
  395. spin_lock(&mb_cache_spinlock);
  396. mb_assert(list_empty(&ce->e_lru_list));
  397. __mb_cache_entry_unhash(ce);
  398. __mb_cache_entry_release_unlock(ce);
  399. }
  400. /*
  401. * mb_cache_entry_get()
  402. *
  403. * Get a cache entry by device / block number. (There can only be one entry
  404. * in the cache per device and block.) Returns NULL if no such cache entry
  405. * exists. The returned cache entry is locked for exclusive access ("single
  406. * writer").
  407. */
  408. struct mb_cache_entry *
  409. mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
  410. sector_t block)
  411. {
  412. unsigned int bucket;
  413. struct list_head *l;
  414. struct mb_cache_entry *ce;
  415. bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
  416. cache->c_bucket_bits);
  417. spin_lock(&mb_cache_spinlock);
  418. list_for_each(l, &cache->c_block_hash[bucket]) {
  419. ce = list_entry(l, struct mb_cache_entry, e_block_list);
  420. if (ce->e_bdev == bdev && ce->e_block == block) {
  421. DEFINE_WAIT(wait);
  422. if (!list_empty(&ce->e_lru_list))
  423. list_del_init(&ce->e_lru_list);
  424. while (ce->e_used > 0) {
  425. ce->e_queued++;
  426. prepare_to_wait(&mb_cache_queue, &wait,
  427. TASK_UNINTERRUPTIBLE);
  428. spin_unlock(&mb_cache_spinlock);
  429. schedule();
  430. spin_lock(&mb_cache_spinlock);
  431. ce->e_queued--;
  432. }
  433. finish_wait(&mb_cache_queue, &wait);
  434. ce->e_used += 1 + MB_CACHE_WRITER;
  435. if (!__mb_cache_entry_is_hashed(ce)) {
  436. __mb_cache_entry_release_unlock(ce);
  437. return NULL;
  438. }
  439. goto cleanup;
  440. }
  441. }
  442. ce = NULL;
  443. cleanup:
  444. spin_unlock(&mb_cache_spinlock);
  445. return ce;
  446. }
  447. #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
  448. static struct mb_cache_entry *
  449. __mb_cache_entry_find(struct list_head *l, struct list_head *head,
  450. struct block_device *bdev, unsigned int key)
  451. {
  452. while (l != head) {
  453. struct mb_cache_entry *ce =
  454. list_entry(l, struct mb_cache_entry, e_index.o_list);
  455. if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
  456. DEFINE_WAIT(wait);
  457. if (!list_empty(&ce->e_lru_list))
  458. list_del_init(&ce->e_lru_list);
  459. /* Incrementing before holding the lock gives readers
  460. priority over writers. */
  461. ce->e_used++;
  462. while (ce->e_used >= MB_CACHE_WRITER) {
  463. ce->e_queued++;
  464. prepare_to_wait(&mb_cache_queue, &wait,
  465. TASK_UNINTERRUPTIBLE);
  466. spin_unlock(&mb_cache_spinlock);
  467. schedule();
  468. spin_lock(&mb_cache_spinlock);
  469. ce->e_queued--;
  470. }
  471. finish_wait(&mb_cache_queue, &wait);
  472. if (!__mb_cache_entry_is_hashed(ce)) {
  473. __mb_cache_entry_release_unlock(ce);
  474. spin_lock(&mb_cache_spinlock);
  475. return ERR_PTR(-EAGAIN);
  476. }
  477. return ce;
  478. }
  479. l = l->next;
  480. }
  481. return NULL;
  482. }
  483. /*
  484. * mb_cache_entry_find_first()
  485. *
  486. * Find the first cache entry on a given device with a certain key in
  487. * an additional index. Additional matches can be found with
  488. * mb_cache_entry_find_next(). Returns NULL if no match was found. The
  489. * returned cache entry is locked for shared access ("multiple readers").
  490. *
  491. * @cache: the cache to search
  492. * @bdev: the device the cache entry should belong to
  493. * @key: the key in the index
  494. */
  495. struct mb_cache_entry *
  496. mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
  497. unsigned int key)
  498. {
  499. unsigned int bucket = hash_long(key, cache->c_bucket_bits);
  500. struct list_head *l;
  501. struct mb_cache_entry *ce;
  502. spin_lock(&mb_cache_spinlock);
  503. l = cache->c_index_hash[bucket].next;
  504. ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
  505. spin_unlock(&mb_cache_spinlock);
  506. return ce;
  507. }
  508. /*
  509. * mb_cache_entry_find_next()
  510. *
  511. * Find the next cache entry on a given device with a certain key in an
  512. * additional index. Returns NULL if no match could be found. The previous
  513. * entry is atomatically released, so that mb_cache_entry_find_next() can
  514. * be called like this:
  515. *
  516. * entry = mb_cache_entry_find_first();
  517. * while (entry) {
  518. * ...
  519. * entry = mb_cache_entry_find_next(entry, ...);
  520. * }
  521. *
  522. * @prev: The previous match
  523. * @bdev: the device the cache entry should belong to
  524. * @key: the key in the index
  525. */
  526. struct mb_cache_entry *
  527. mb_cache_entry_find_next(struct mb_cache_entry *prev,
  528. struct block_device *bdev, unsigned int key)
  529. {
  530. struct mb_cache *cache = prev->e_cache;
  531. unsigned int bucket = hash_long(key, cache->c_bucket_bits);
  532. struct list_head *l;
  533. struct mb_cache_entry *ce;
  534. spin_lock(&mb_cache_spinlock);
  535. l = prev->e_index.o_list.next;
  536. ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
  537. __mb_cache_entry_release_unlock(prev);
  538. return ce;
  539. }
  540. #endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
  541. static int __init init_mbcache(void)
  542. {
  543. register_shrinker(&mb_cache_shrinker);
  544. return 0;
  545. }
  546. static void __exit exit_mbcache(void)
  547. {
  548. unregister_shrinker(&mb_cache_shrinker);
  549. }
  550. module_init(init_mbcache)
  551. module_exit(exit_mbcache)