gc.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. /*
  2. * fs/f2fs/gc.h
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #define GC_THREAD_NAME "f2fs_gc_task"
  12. #define GC_THREAD_MIN_WB_PAGES 1 /*
  13. * a threshold to determine
  14. * whether IO subsystem is idle
  15. * or not
  16. */
  17. #define GC_THREAD_MIN_SLEEP_TIME 10000 /* milliseconds */
  18. #define GC_THREAD_MAX_SLEEP_TIME 30000
  19. #define GC_THREAD_NOGC_SLEEP_TIME 10000
  20. #define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
  21. #define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
  22. /* Search max. number of dirty segments to select a victim segment */
  23. #define MAX_VICTIM_SEARCH 20
  24. enum {
  25. GC_NONE = 0,
  26. GC_ERROR,
  27. GC_OK,
  28. GC_NEXT,
  29. GC_BLOCKED,
  30. GC_DONE,
  31. };
  32. struct f2fs_gc_kthread {
  33. struct task_struct *f2fs_gc_task;
  34. wait_queue_head_t gc_wait_queue_head;
  35. };
  36. struct inode_entry {
  37. struct list_head list;
  38. struct inode *inode;
  39. };
  40. /*
  41. * inline functions
  42. */
  43. static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
  44. {
  45. if (free_segments(sbi) < overprovision_segments(sbi))
  46. return 0;
  47. else
  48. return (free_segments(sbi) - overprovision_segments(sbi))
  49. << sbi->log_blocks_per_seg;
  50. }
  51. static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
  52. {
  53. return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
  54. }
  55. static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
  56. {
  57. block_t reclaimable_user_blocks = sbi->user_block_count -
  58. written_block_count(sbi);
  59. return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
  60. }
  61. static inline long increase_sleep_time(long wait)
  62. {
  63. wait += GC_THREAD_MIN_SLEEP_TIME;
  64. if (wait > GC_THREAD_MAX_SLEEP_TIME)
  65. wait = GC_THREAD_MAX_SLEEP_TIME;
  66. return wait;
  67. }
  68. static inline long decrease_sleep_time(long wait)
  69. {
  70. wait -= GC_THREAD_MIN_SLEEP_TIME;
  71. if (wait <= GC_THREAD_MIN_SLEEP_TIME)
  72. wait = GC_THREAD_MIN_SLEEP_TIME;
  73. return wait;
  74. }
  75. static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
  76. {
  77. block_t invalid_user_blocks = sbi->user_block_count -
  78. written_block_count(sbi);
  79. /*
  80. * Background GC is triggered with the following condition.
  81. * 1. There are a number of invalid blocks.
  82. * 2. There is not enough free space.
  83. */
  84. if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
  85. free_user_blocks(sbi) < limit_free_user_blocks(sbi))
  86. return true;
  87. return false;
  88. }
  89. static inline int is_idle(struct f2fs_sb_info *sbi)
  90. {
  91. struct block_device *bdev = sbi->sb->s_bdev;
  92. struct request_queue *q = bdev_get_queue(bdev);
  93. struct request_list *rl = &q->root_rl;
  94. return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
  95. }
  96. static inline bool should_do_checkpoint(struct f2fs_sb_info *sbi)
  97. {
  98. unsigned int pages_per_sec = sbi->segs_per_sec *
  99. (1 << sbi->log_blocks_per_seg);
  100. int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
  101. >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
  102. int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
  103. >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
  104. return free_sections(sbi) <= (node_secs + 2 * dent_secs + 2);
  105. }