gc.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. /*
  2. * fs/f2fs/gc.h
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #define GC_THREAD_MIN_WB_PAGES 1 /*
  12. * a threshold to determine
  13. * whether IO subsystem is idle
  14. * or not
  15. */
  16. #define GC_THREAD_MIN_SLEEP_TIME 10000 /* milliseconds */
  17. #define GC_THREAD_MAX_SLEEP_TIME 30000
  18. #define GC_THREAD_NOGC_SLEEP_TIME 10000
  19. #define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
  20. #define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
  21. /* Search max. number of dirty segments to select a victim segment */
  22. #define MAX_VICTIM_SEARCH 20
  23. struct f2fs_gc_kthread {
  24. struct task_struct *f2fs_gc_task;
  25. wait_queue_head_t gc_wait_queue_head;
  26. };
  27. struct inode_entry {
  28. struct list_head list;
  29. struct inode *inode;
  30. };
  31. /*
  32. * inline functions
  33. */
  34. static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
  35. {
  36. if (free_segments(sbi) < overprovision_segments(sbi))
  37. return 0;
  38. else
  39. return (free_segments(sbi) - overprovision_segments(sbi))
  40. << sbi->log_blocks_per_seg;
  41. }
  42. static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
  43. {
  44. return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
  45. }
  46. static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
  47. {
  48. block_t reclaimable_user_blocks = sbi->user_block_count -
  49. written_block_count(sbi);
  50. return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
  51. }
  52. static inline long increase_sleep_time(long wait)
  53. {
  54. wait += GC_THREAD_MIN_SLEEP_TIME;
  55. if (wait > GC_THREAD_MAX_SLEEP_TIME)
  56. wait = GC_THREAD_MAX_SLEEP_TIME;
  57. return wait;
  58. }
  59. static inline long decrease_sleep_time(long wait)
  60. {
  61. wait -= GC_THREAD_MIN_SLEEP_TIME;
  62. if (wait <= GC_THREAD_MIN_SLEEP_TIME)
  63. wait = GC_THREAD_MIN_SLEEP_TIME;
  64. return wait;
  65. }
  66. static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
  67. {
  68. block_t invalid_user_blocks = sbi->user_block_count -
  69. written_block_count(sbi);
  70. /*
  71. * Background GC is triggered with the following condition.
  72. * 1. There are a number of invalid blocks.
  73. * 2. There is not enough free space.
  74. */
  75. if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
  76. free_user_blocks(sbi) < limit_free_user_blocks(sbi))
  77. return true;
  78. return false;
  79. }
  80. static inline int is_idle(struct f2fs_sb_info *sbi)
  81. {
  82. struct block_device *bdev = sbi->sb->s_bdev;
  83. struct request_queue *q = bdev_get_queue(bdev);
  84. struct request_list *rl = &q->root_rl;
  85. return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
  86. }