gc.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /*
  2. * fs/f2fs/gc.h
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #define GC_THREAD_MIN_WB_PAGES 1 /*
  12. * a threshold to determine
  13. * whether IO subsystem is idle
  14. * or not
  15. */
  16. #define GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */
  17. #define GC_THREAD_MAX_SLEEP_TIME 60000
  18. #define GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
  19. #define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
  20. #define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
  21. /* Search max. number of dirty segments to select a victim segment */
  22. #define MAX_VICTIM_SEARCH 20
  23. struct f2fs_gc_kthread {
  24. struct task_struct *f2fs_gc_task;
  25. wait_queue_head_t gc_wait_queue_head;
  26. };
  27. struct inode_entry {
  28. struct list_head list;
  29. struct inode *inode;
  30. };
  31. /*
  32. * inline functions
  33. */
  34. static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
  35. {
  36. if (free_segments(sbi) < overprovision_segments(sbi))
  37. return 0;
  38. else
  39. return (free_segments(sbi) - overprovision_segments(sbi))
  40. << sbi->log_blocks_per_seg;
  41. }
  42. static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
  43. {
  44. return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
  45. }
  46. static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
  47. {
  48. block_t reclaimable_user_blocks = sbi->user_block_count -
  49. written_block_count(sbi);
  50. return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
  51. }
  52. static inline long increase_sleep_time(long wait)
  53. {
  54. if (wait == GC_THREAD_NOGC_SLEEP_TIME)
  55. return wait;
  56. wait += GC_THREAD_MIN_SLEEP_TIME;
  57. if (wait > GC_THREAD_MAX_SLEEP_TIME)
  58. wait = GC_THREAD_MAX_SLEEP_TIME;
  59. return wait;
  60. }
  61. static inline long decrease_sleep_time(long wait)
  62. {
  63. if (wait == GC_THREAD_NOGC_SLEEP_TIME)
  64. wait = GC_THREAD_MAX_SLEEP_TIME;
  65. wait -= GC_THREAD_MIN_SLEEP_TIME;
  66. if (wait <= GC_THREAD_MIN_SLEEP_TIME)
  67. wait = GC_THREAD_MIN_SLEEP_TIME;
  68. return wait;
  69. }
  70. static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
  71. {
  72. block_t invalid_user_blocks = sbi->user_block_count -
  73. written_block_count(sbi);
  74. /*
  75. * Background GC is triggered with the following condition.
  76. * 1. There are a number of invalid blocks.
  77. * 2. There is not enough free space.
  78. */
  79. if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
  80. free_user_blocks(sbi) < limit_free_user_blocks(sbi))
  81. return true;
  82. return false;
  83. }
  84. static inline int is_idle(struct f2fs_sb_info *sbi)
  85. {
  86. struct block_device *bdev = sbi->sb->s_bdev;
  87. struct request_queue *q = bdev_get_queue(bdev);
  88. struct request_list *rl = &q->root_rl;
  89. return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
  90. }