gc.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. /*
  2. * fs/f2fs/gc.h
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #define GC_THREAD_MIN_WB_PAGES 1 /*
  12. * a threshold to determine
  13. * whether IO subsystem is idle
  14. * or not
  15. */
  16. #define GC_THREAD_MIN_SLEEP_TIME 10000 /* milliseconds */
  17. #define GC_THREAD_MAX_SLEEP_TIME 30000
  18. #define GC_THREAD_NOGC_SLEEP_TIME 10000
  19. #define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
  20. #define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
  21. /* Search max. number of dirty segments to select a victim segment */
  22. #define MAX_VICTIM_SEARCH 20
  23. enum {
  24. GC_NONE = 0,
  25. GC_ERROR,
  26. GC_OK,
  27. GC_NEXT,
  28. GC_BLOCKED,
  29. GC_DONE,
  30. };
  31. struct f2fs_gc_kthread {
  32. struct task_struct *f2fs_gc_task;
  33. wait_queue_head_t gc_wait_queue_head;
  34. };
  35. struct inode_entry {
  36. struct list_head list;
  37. struct inode *inode;
  38. };
  39. /*
  40. * inline functions
  41. */
  42. static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
  43. {
  44. if (free_segments(sbi) < overprovision_segments(sbi))
  45. return 0;
  46. else
  47. return (free_segments(sbi) - overprovision_segments(sbi))
  48. << sbi->log_blocks_per_seg;
  49. }
  50. static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
  51. {
  52. return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
  53. }
  54. static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
  55. {
  56. block_t reclaimable_user_blocks = sbi->user_block_count -
  57. written_block_count(sbi);
  58. return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
  59. }
  60. static inline long increase_sleep_time(long wait)
  61. {
  62. wait += GC_THREAD_MIN_SLEEP_TIME;
  63. if (wait > GC_THREAD_MAX_SLEEP_TIME)
  64. wait = GC_THREAD_MAX_SLEEP_TIME;
  65. return wait;
  66. }
  67. static inline long decrease_sleep_time(long wait)
  68. {
  69. wait -= GC_THREAD_MIN_SLEEP_TIME;
  70. if (wait <= GC_THREAD_MIN_SLEEP_TIME)
  71. wait = GC_THREAD_MIN_SLEEP_TIME;
  72. return wait;
  73. }
  74. static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
  75. {
  76. block_t invalid_user_blocks = sbi->user_block_count -
  77. written_block_count(sbi);
  78. /*
  79. * Background GC is triggered with the following condition.
  80. * 1. There are a number of invalid blocks.
  81. * 2. There is not enough free space.
  82. */
  83. if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
  84. free_user_blocks(sbi) < limit_free_user_blocks(sbi))
  85. return true;
  86. return false;
  87. }
  88. static inline int is_idle(struct f2fs_sb_info *sbi)
  89. {
  90. struct block_device *bdev = sbi->sb->s_bdev;
  91. struct request_queue *q = bdev_get_queue(bdev);
  92. struct request_list *rl = &q->root_rl;
  93. return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
  94. }
  95. static inline bool should_do_checkpoint(struct f2fs_sb_info *sbi)
  96. {
  97. int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
  98. int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
  99. return free_sections(sbi) <= (node_secs + 2 * dent_secs + 2);
  100. }